aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-02-19 11:16:57 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-02-19 11:17:28 +0000
commitca33590b6af032bff57d9cc70455660466a654b2 (patch)
tree0b68b090bd9b4a78a3614b62400b29279d76d553 /drivers
parent169a9de21e263aa6599cdc2d87a45ae158d9f509 (diff)
New upstream version 18.02upstream/18.02
Change-Id: I89ed24cb2a49b78fe5be6970b99dd46c1499fcc3 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile36
-rw-r--r--drivers/bbdev/Makefile14
-rw-r--r--drivers/bbdev/null/Makefile25
-rw-r--r--drivers/bbdev/null/bbdev_null.c356
-rw-r--r--drivers/bbdev/null/rte_pmd_bbdev_null_version.map3
-rw-r--r--drivers/bbdev/turbo_sw/Makefile42
-rw-r--r--drivers/bbdev/turbo_sw/bbdev_turbo_software.c1217
-rw-r--r--drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map3
-rw-r--r--drivers/bus/Makefile31
-rw-r--r--drivers/bus/dpaa/Makefile32
-rw-r--r--drivers/bus/dpaa/base/fman/fman.c39
-rw-r--r--drivers/bus/dpaa/base/fman/fman_hw.c27
-rw-r--r--drivers/bus/dpaa/base/fman/netcfg_layer.c53
-rw-r--r--drivers/bus/dpaa/base/fman/of.c37
-rw-r--r--drivers/bus/dpaa/base/qbman/bman.c37
-rw-r--r--drivers/bus/dpaa/base/qbman/bman.h69
-rw-r--r--drivers/bus/dpaa/base/qbman/bman_driver.c37
-rw-r--r--drivers/bus/dpaa/base/qbman/bman_priv.h37
-rw-r--r--drivers/bus/dpaa/base/qbman/dpaa_alloc.c37
-rw-r--r--drivers/bus/dpaa/base/qbman/dpaa_sys.c37
-rw-r--r--drivers/bus/dpaa/base/qbman/dpaa_sys.h37
-rw-r--r--drivers/bus/dpaa/base/qbman/process.c37
-rw-r--r--drivers/bus/dpaa/base/qbman/qman.c362
-rw-r--r--drivers/bus/dpaa/base/qbman/qman.h105
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_driver.c190
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_priv.h43
-rw-r--r--drivers/bus/dpaa/dpaa_bus.c153
-rw-r--r--drivers/bus/dpaa/include/compat.h35
-rw-r--r--drivers/bus/dpaa/include/dpaa_bits.h30
-rw-r--r--drivers/bus/dpaa/include/dpaa_list.h30
-rw-r--r--drivers/bus/dpaa/include/dpaa_rbtree.h30
-rw-r--r--drivers/bus/dpaa/include/fman.h37
-rw-r--r--drivers/bus/dpaa/include/fsl_bman.h35
-rw-r--r--drivers/bus/dpaa/include/fsl_fman.h37
-rw-r--r--drivers/bus/dpaa/include/fsl_fman_crc64.h35
-rw-r--r--drivers/bus/dpaa/include/fsl_qman.h137
-rw-r--r--drivers/bus/dpaa/include/fsl_usd.h39
-rw-r--r--drivers/bus/dpaa/include/netcfg.h35
-rw-r--r--drivers/bus/dpaa/include/of.h37
-rw-r--r--drivers/bus/dpaa/include/process.h46
-rw-r--r--drivers/bus/dpaa/rte_bus_dpaa_version.map28
-rw-r--r--drivers/bus/dpaa/rte_dpaa_bus.h69
-rw-r--r--drivers/bus/dpaa/rte_dpaa_logs.h46
-rw-r--r--drivers/bus/fslmc/Makefile34
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c92
-rw-r--r--drivers/bus/fslmc/fslmc_logs.h30
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.c41
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.h30
-rw-r--r--drivers/bus/fslmc/mc/dpbp.c37
-rw-r--r--drivers/bus/fslmc/mc/dpci.c35
-rw-r--r--drivers/bus/fslmc/mc/dpcon.c30
-rw-r--r--drivers/bus/fslmc/mc/dpio.c37
-rw-r--r--drivers/bus/fslmc/mc/dpmng.c37
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp.h37
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp_cmd.h37
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci.h35
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpci_cmd.h35
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpcon.h35
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpcon_cmd.h37
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpio.h37
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpio_cmd.h37
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpmng.h37
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpmng_cmd.h37
-rw-r--r--drivers/bus/fslmc/mc/fsl_mc_cmd.h37
-rw-r--r--drivers/bus/fslmc/mc/fsl_mc_sys.h38
-rw-r--r--drivers/bus/fslmc/mc/mc_sys.c37
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c39
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpci.c32
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.c78
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.h33
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_pvt.h113
-rw-r--r--drivers/bus/fslmc/qbman/include/compat.h26
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_base.h24
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h30
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h42
-rw-r--r--drivers/bus/fslmc/qbman/qbman_debug.c66
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.c49
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.h24
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys.h26
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys_decl.h24
-rw-r--r--drivers/bus/fslmc/rte_bus_fslmc_version.map16
-rw-r--r--drivers/bus/fslmc/rte_fslmc.h81
-rw-r--r--drivers/bus/meson.build7
-rw-r--r--drivers/bus/pci/bsd/pci.c33
-rw-r--r--drivers/bus/pci/linux/pci.c124
-rw-r--r--drivers/bus/pci/linux/pci_init.h53
-rw-r--r--drivers/bus/pci/linux/pci_uio.c33
-rw-r--r--drivers/bus/pci/linux/pci_vfio.c35
-rw-r--r--drivers/bus/pci/meson.build16
-rw-r--r--drivers/bus/pci/pci_common.c35
-rw-r--r--drivers/bus/pci/pci_common_uio.c34
-rw-r--r--drivers/bus/pci/private.h33
-rw-r--r--drivers/bus/pci/rte_bus_pci.h35
-rw-r--r--drivers/bus/vdev/Makefile32
-rw-r--r--drivers/bus/vdev/meson.build5
-rw-r--r--drivers/bus/vdev/rte_bus_vdev.h35
-rw-r--r--drivers/bus/vdev/rte_bus_vdev_version.map8
-rw-r--r--drivers/bus/vdev/vdev.c80
-rw-r--r--drivers/bus/vdev/vdev_logs.h33
-rw-r--r--drivers/crypto/Makefile32
-rw-r--r--drivers/crypto/aesni_gcm/Makefile31
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_ops.h32
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c32
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c32
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h34
-rw-r--r--drivers/crypto/aesni_mb/Makefile31
-rw-r--r--drivers/crypto/aesni_mb/aesni_mb_ops.h32
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c177
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c65
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h44
-rw-r--r--drivers/crypto/armv8/Makefile32
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c32
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c32
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_private.h34
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile33
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c622
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h30
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h31
-rw-r--r--drivers/crypto/dpaa2_sec/hw/compat.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/algo.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/common.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/ipsec.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta.h39
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h41
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h37
-rw-r--r--drivers/crypto/dpaa2_sec/mc/dpseci.c37
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h37
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h37
-rw-r--r--drivers/crypto/dpaa_sec/Makefile34
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c1287
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.h170
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec_log.h28
-rw-r--r--drivers/crypto/kasumi/Makefile31
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c32
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_ops.c32
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_private.h34
-rw-r--r--drivers/crypto/meson.build7
-rw-r--r--drivers/crypto/mrvl/Makefile5
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_compat.h1
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_pmd_ops.c2
-rw-r--r--drivers/crypto/null/Makefile31
-rw-r--r--drivers/crypto/null/meson.build6
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c32
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c32
-rw-r--r--drivers/crypto/null/null_crypto_pmd_private.h34
-rw-r--r--drivers/crypto/openssl/Makefile31
-rw-r--r--drivers/crypto/openssl/meson.build11
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c32
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c32
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_private.h34
-rw-r--r--drivers/crypto/qat/Makefile31
-rw-r--r--drivers/crypto/qat/meson.build14
-rw-r--r--drivers/crypto/qat/qat_adf/adf_transport_access_macros.h1
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c10
-rw-r--r--drivers/crypto/qat/qat_crypto.c69
-rw-r--r--drivers/crypto/qat/qat_crypto.h33
-rw-r--r--drivers/crypto/qat/qat_crypto_capabilities.h33
-rw-r--r--drivers/crypto/qat/qat_logs.h33
-rw-r--r--drivers/crypto/qat/qat_qp.c43
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c33
-rw-r--r--drivers/crypto/scheduler/Makefile31
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c40
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h35
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h33
-rw-r--r--drivers/crypto/scheduler/scheduler_failover.c32
-rw-r--r--drivers/crypto/scheduler/scheduler_multicore.c32
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c32
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c32
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c32
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h33
-rw-r--r--drivers/crypto/scheduler/scheduler_roundrobin.c32
-rw-r--r--drivers/crypto/snow3g/Makefile31
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c32
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c32
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_private.h34
-rw-r--r--drivers/crypto/zuc/Makefile31
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c32
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_ops.c32
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_private.h34
-rw-r--r--drivers/event/Makefile33
-rw-r--r--drivers/event/dpaa/Makefile38
-rw-r--r--drivers/event/dpaa/dpaa_eventdev.c655
-rw-r--r--drivers/event/dpaa/dpaa_eventdev.h81
-rw-r--r--drivers/event/dpaa/rte_pmd_dpaa_event_version.map4
-rw-r--r--drivers/event/dpaa2/Makefile31
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.c154
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.h41
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev_logs.h37
-rw-r--r--drivers/event/dpaa2/dpaa2_hw_dpcon.c33
-rw-r--r--drivers/event/meson.build7
-rw-r--r--drivers/event/octeontx/Makefile39
-rw-r--r--drivers/event/octeontx/meson.build9
-rw-r--r--drivers/event/octeontx/rte_pmd_octeontx_event_version.map (renamed from drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map)0
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c95
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h59
-rw-r--r--drivers/event/octeontx/ssovf_evdev_selftest.c1487
-rw-r--r--drivers/event/octeontx/ssovf_worker.c32
-rw-r--r--drivers/event/octeontx/ssovf_worker.h39
-rw-r--r--drivers/event/opdl/Makefile39
-rw-r--r--drivers/event/opdl/opdl_evdev.c769
-rw-r--r--drivers/event/opdl/opdl_evdev.h314
-rw-r--r--drivers/event/opdl/opdl_evdev_init.c940
-rw-r--r--drivers/event/opdl/opdl_evdev_xstats.c180
-rw-r--r--drivers/event/opdl/opdl_log.h21
-rw-r--r--drivers/event/opdl/opdl_ring.c1233
-rw-r--r--drivers/event/opdl/opdl_ring.h600
-rw-r--r--drivers/event/opdl/opdl_test.c1057
-rw-r--r--drivers/event/opdl/rte_pmd_evdev_opdl_version.map3
-rw-r--r--drivers/event/skeleton/Makefile32
-rw-r--r--drivers/event/skeleton/meson.build5
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.c33
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.h32
-rw-r--r--drivers/event/sw/Makefile34
-rw-r--r--drivers/event/sw/event_ring.h32
-rw-r--r--drivers/event/sw/iq_chunk.h196
-rw-r--r--drivers/event/sw/iq_ring.h172
-rw-r--r--drivers/event/sw/meson.build11
-rw-r--r--drivers/event/sw/sw_evdev.c184
-rw-r--r--drivers/event/sw/sw_evdev.h71
-rw-r--r--drivers/event/sw/sw_evdev_log.h23
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c72
-rw-r--r--drivers/event/sw/sw_evdev_selftest.c3245
-rw-r--r--drivers/event/sw/sw_evdev_worker.c70
-rw-r--r--drivers/event/sw/sw_evdev_xstats.c44
-rw-r--r--drivers/mempool/Makefile31
-rw-r--r--drivers/mempool/dpaa/Makefile31
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.c105
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.h43
-rw-r--r--drivers/mempool/dpaa/rte_mempool_dpaa_version.map1
-rw-r--r--drivers/mempool/dpaa2/Makefile30
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c34
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.h32
-rw-r--r--drivers/mempool/meson.build7
-rw-r--r--drivers/mempool/octeontx/Makefile31
-rw-r--r--drivers/mempool/octeontx/meson.build10
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.c71
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.h38
-rw-r--r--drivers/mempool/octeontx/octeontx_mbox.c36
-rw-r--r--drivers/mempool/octeontx/octeontx_mbox.h32
-rw-r--r--drivers/mempool/octeontx/octeontx_pool_logs.h73
-rw-r--r--drivers/mempool/octeontx/octeontx_ssovf.c32
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx.c105
-rw-r--r--drivers/mempool/ring/Makefile31
-rw-r--r--drivers/mempool/ring/meson.build4
-rw-r--r--drivers/mempool/ring/rte_mempool_ring.c33
-rw-r--r--drivers/mempool/stack/Makefile31
-rw-r--r--drivers/mempool/stack/meson.build4
-rw-r--r--drivers/mempool/stack/rte_mempool_stack.c33
-rw-r--r--drivers/meson.build133
-rw-r--r--drivers/net/Makefile34
-rw-r--r--drivers/net/af_packet/meson.build7
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c46
-rw-r--r--drivers/net/ark/ark_ethdev_rx.h2
-rw-r--r--drivers/net/ark/ark_ethdev_tx.h2
-rw-r--r--drivers/net/ark/ark_ext.h2
-rw-r--r--drivers/net/ark/ark_global.h2
-rw-r--r--drivers/net/ark/ark_pktchkr.c2
-rw-r--r--drivers/net/ark/ark_pktgen.c2
-rw-r--r--drivers/net/avf/Makefile54
-rw-r--r--drivers/net/avf/avf.h216
-rw-r--r--drivers/net/avf/avf_ethdev.c1430
-rw-r--r--drivers/net/avf/avf_log.h44
-rw-r--r--drivers/net/avf/avf_rxtx.c1959
-rw-r--r--drivers/net/avf/avf_rxtx.h260
-rw-r--r--drivers/net/avf/avf_rxtx_vec_common.h210
-rw-r--r--drivers/net/avf/avf_rxtx_vec_sse.c656
-rw-r--r--drivers/net/avf/avf_vchnl.c812
-rw-r--r--drivers/net/avf/base/README19
-rw-r--r--drivers/net/avf/base/avf_adminq.c1010
-rw-r--r--drivers/net/avf/base/avf_adminq.h166
-rw-r--r--drivers/net/avf/base/avf_adminq_cmd.h2842
-rw-r--r--drivers/net/avf/base/avf_alloc.h65
-rw-r--r--drivers/net/avf/base/avf_common.c1845
-rw-r--r--drivers/net/avf/base/avf_devids.h43
-rw-r--r--drivers/net/avf/base/avf_hmc.h245
-rw-r--r--drivers/net/avf/base/avf_lan_hmc.h200
-rw-r--r--drivers/net/avf/base/avf_osdep.h187
-rw-r--r--drivers/net/avf/base/avf_prototype.h206
-rw-r--r--drivers/net/avf/base/avf_register.h346
-rw-r--r--drivers/net/avf/base/avf_status.h108
-rw-r--r--drivers/net/avf/base/avf_type.h2024
-rw-r--r--drivers/net/avf/base/virtchnl.h787
-rw-r--r--drivers/net/avf/rte_pmd_avf_version.map4
-rw-r--r--drivers/net/avp/avp_ethdev.c12
-rw-r--r--drivers/net/avp/avp_logs.h9
-rw-r--r--drivers/net/avp/rte_avp_common.h1
-rw-r--r--drivers/net/bnx2x/bnx2x.c6
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c15
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_logs.h17
-rw-r--r--drivers/net/bnx2x/ecore_sp.c4
-rw-r--r--drivers/net/bnxt/bnxt.h68
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c20
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c626
-rw-r--r--drivers/net/bnxt/bnxt_filter.c89
-rw-r--r--drivers/net/bnxt/bnxt_filter.h1
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c339
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h14
-rw-r--r--drivers/net/bnxt/bnxt_irq.c4
-rw-r--r--drivers/net/bnxt/bnxt_ring.c19
-rw-r--r--drivers/net/bnxt/bnxt_ring.h5
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c82
-rw-r--r--drivers/net/bnxt/bnxt_rxq.h8
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c126
-rw-r--r--drivers/net/bnxt/bnxt_rxr.h3
-rw-r--r--drivers/net/bnxt/bnxt_stats.c25
-rw-r--r--drivers/net/bnxt/bnxt_stats.h2
-rw-r--r--drivers/net/bnxt/bnxt_txq.c15
-rw-r--r--drivers/net/bnxt/bnxt_txq.h1
-rw-r--r--drivers/net/bnxt/bnxt_txr.c53
-rw-r--r--drivers/net/bnxt/bnxt_txr.h2
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c14
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h1
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h226
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.c52
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.h2
-rw-r--r--drivers/net/bonding/Makefile32
-rw-r--r--drivers/net/bonding/meson.build11
-rw-r--r--drivers/net/bonding/rte_eth_bond.h33
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c36
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.h33
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad_private.h33
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.c33
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.h33
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c95
-rw-r--r--drivers/net/bonding/rte_eth_bond_args.c33
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c673
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h60
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c2
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c2
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c2
-rw-r--r--drivers/net/cxgbe/sge.c2
-rw-r--r--drivers/net/dpaa/Makefile35
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.c462
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.h82
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.c367
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.h36
-rw-r--r--drivers/net/dpaa/rte_pmd_dpaa.h39
-rw-r--r--drivers/net/dpaa/rte_pmd_dpaa_version.map10
-rw-r--r--drivers/net/dpaa2/Makefile33
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni.c32
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h52
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.c144
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.h62
-rw-r--r--drivers/net/dpaa2/dpaa2_rxtx.c364
-rw-r--r--drivers/net/dpaa2/mc/dpkg.c35
-rw-r--r--drivers/net/dpaa2/mc/dpni.c39
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpkg.h38
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni.h37
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni_cmd.h37
-rw-r--r--drivers/net/dpaa2/mc/fsl_net.h35
-rw-r--r--drivers/net/e1000/Makefile32
-rw-r--r--drivers/net/e1000/base/e1000_82575.c2
-rw-r--r--drivers/net/e1000/base/e1000_ich8lan.c2
-rw-r--r--drivers/net/e1000/base/meson.build37
-rw-r--r--drivers/net/e1000/e1000_ethdev.h53
-rw-r--r--drivers/net/e1000/e1000_logs.h49
-rw-r--r--drivers/net/e1000/em_ethdev.c100
-rw-r--r--drivers/net/e1000/em_rxtx.c35
-rw-r--r--drivers/net/e1000/igb_ethdev.c118
-rw-r--r--drivers/net/e1000/igb_flow.c215
-rw-r--r--drivers/net/e1000/igb_pf.c35
-rw-r--r--drivers/net/e1000/igb_regs.h33
-rw-r--r--drivers/net/e1000/igb_rxtx.c96
-rw-r--r--drivers/net/e1000/meson.build16
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h2
-rw-r--r--drivers/net/ena/ena_ethdev.c139
-rw-r--r--drivers/net/ena/ena_ethdev.h5
-rw-r--r--drivers/net/ena/ena_logs.h14
-rw-r--r--drivers/net/ena/ena_platform.h2
-rw-r--r--drivers/net/enic/LICENSE27
-rw-r--r--drivers/net/enic/Makefile32
-rw-r--r--drivers/net/enic/base/cq_desc.h33
-rw-r--r--drivers/net/enic/base/cq_enet_desc.h33
-rw-r--r--drivers/net/enic/base/rq_enet_desc.h33
-rw-r--r--drivers/net/enic/base/vnic_cq.c33
-rw-r--r--drivers/net/enic/base/vnic_cq.h33
-rw-r--r--drivers/net/enic/base/vnic_dev.c44
-rw-r--r--drivers/net/enic/base/vnic_dev.h35
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h33
-rw-r--r--drivers/net/enic/base/vnic_enet.h33
-rw-r--r--drivers/net/enic/base/vnic_intr.c33
-rw-r--r--drivers/net/enic/base/vnic_intr.h33
-rw-r--r--drivers/net/enic/base/vnic_nic.h33
-rw-r--r--drivers/net/enic/base/vnic_resource.h33
-rw-r--r--drivers/net/enic/base/vnic_rq.c33
-rw-r--r--drivers/net/enic/base/vnic_rq.h33
-rw-r--r--drivers/net/enic/base/vnic_rss.c33
-rw-r--r--drivers/net/enic/base/vnic_rss.h32
-rw-r--r--drivers/net/enic/base/vnic_stats.h33
-rw-r--r--drivers/net/enic/base/vnic_wq.c33
-rw-r--r--drivers/net/enic/base/vnic_wq.h33
-rw-r--r--drivers/net/enic/base/wq_enet_desc.h33
-rw-r--r--drivers/net/enic/enic.h61
-rw-r--r--drivers/net/enic/enic_clsf.c35
-rw-r--r--drivers/net/enic/enic_compat.h35
-rw-r--r--drivers/net/enic/enic_ethdev.c95
-rw-r--r--drivers/net/enic/enic_flow.c44
-rw-r--r--drivers/net/enic/enic_main.c91
-rw-r--r--drivers/net/enic/enic_res.c35
-rw-r--r--drivers/net/enic/enic_res.h33
-rw-r--r--drivers/net/enic/enic_rxtx.c175
-rw-r--r--drivers/net/failsafe/Makefile8
-rw-r--r--drivers/net/failsafe/failsafe.c107
-rw-r--r--drivers/net/failsafe/failsafe_args.c118
-rw-r--r--drivers/net/failsafe/failsafe_eal.c126
-rw-r--r--drivers/net/failsafe/failsafe_ether.c45
-rw-r--r--drivers/net/failsafe/failsafe_flow.c70
-rw-r--r--drivers/net/failsafe/failsafe_intr.c536
-rw-r--r--drivers/net/failsafe/failsafe_ops.c422
-rw-r--r--drivers/net/failsafe/failsafe_private.h168
-rw-r--r--drivers/net/failsafe/failsafe_rxtx.c87
-rw-r--r--drivers/net/fm10k/Makefile32
-rw-r--r--drivers/net/fm10k/base/fm10k_mbx.c2
-rw-r--r--drivers/net/fm10k/base/meson.build27
-rw-r--r--drivers/net/fm10k/fm10k.h33
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c67
-rw-r--r--drivers/net/fm10k/fm10k_logs.h49
-rw-r--r--drivers/net/fm10k/fm10k_rxtx.c35
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c35
-rw-r--r--drivers/net/fm10k/meson.build16
-rw-r--r--drivers/net/i40e/Makefile51
-rw-r--r--drivers/net/i40e/base/README2
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c44
-rw-r--r--drivers/net/i40e/base/i40e_adminq.h3
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h26
-rw-r--r--drivers/net/i40e/base/i40e_common.c376
-rw-r--r--drivers/net/i40e/base/i40e_dcb.c88
-rw-r--r--drivers/net/i40e/base/i40e_devids.h3
-rw-r--r--drivers/net/i40e/base/i40e_hmc.c1
-rw-r--r--drivers/net/i40e/base/i40e_nvm.c447
-rw-r--r--drivers/net/i40e/base/i40e_prototype.h44
-rw-r--r--drivers/net/i40e/base/i40e_status.h1
-rw-r--r--drivers/net/i40e/base/i40e_type.h56
-rw-r--r--drivers/net/i40e/base/meson.build28
-rw-r--r--drivers/net/i40e/base/virtchnl.h12
-rw-r--r--drivers/net/i40e/i40e_ethdev.c1093
-rw-r--r--drivers/net/i40e/i40e_ethdev.h125
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c78
-rw-r--r--drivers/net/i40e/i40e_fdir.c129
-rw-r--r--drivers/net/i40e/i40e_flow.c559
-rw-r--r--drivers/net/i40e/i40e_logs.h33
-rw-r--r--drivers/net/i40e/i40e_pf.c178
-rw-r--r--drivers/net/i40e/i40e_pf.h39
-rw-r--r--drivers/net/i40e/i40e_regs.h33
-rw-r--r--drivers/net/i40e/i40e_rxtx.c106
-rw-r--r--drivers/net/i40e/i40e_rxtx.h39
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_altivec.c2
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_avx2.c792
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h35
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c2
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c35
-rw-r--r--drivers/net/i40e/i40e_tm.c33
-rw-r--r--drivers/net/i40e/meson.build46
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c247
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h174
-rw-r--r--drivers/net/i40e/rte_pmd_i40e_version.map7
-rw-r--r--drivers/net/ixgbe/Makefile33
-rw-r--r--drivers/net/ixgbe/base/README2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.c3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.c31
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.c18
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c183
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.h6
-rw-r--r--drivers/net/ixgbe/base/ixgbe_dcb.c10
-rw-r--r--drivers/net/ixgbe/base/ixgbe_dcb_82598.c22
-rw-r--r--drivers/net/ixgbe/base/ixgbe_dcb_82599.c25
-rw-r--r--drivers/net/ixgbe/base/ixgbe_hv_vf.c17
-rw-r--r--drivers/net/ixgbe/base/ixgbe_mbx.c22
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.c19
-rw-r--r--drivers/net/ixgbe/base/ixgbe_type.h61
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.c7
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.c27
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.c152
-rw-r--r--drivers/net/ixgbe/base/meson.build32
-rw-r--r--drivers/net/ixgbe/ixgbe_82599_bypass.c33
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass.c35
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass.h33
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass_api.h33
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass_defines.h33
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c125
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h43
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c60
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c357
-rw-r--r--drivers/net/ixgbe/ixgbe_ipsec.c132
-rw-r--r--drivers/net/ixgbe/ixgbe_ipsec.h40
-rw-r--r--drivers/net/ixgbe/ixgbe_logs.h49
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c41
-rw-r--r--drivers/net/ixgbe/ixgbe_regs.h33
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c102
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h33
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h35
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c35
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c35
-rw-r--r--drivers/net/ixgbe/ixgbe_tm.c33
-rw-r--r--drivers/net/ixgbe/meson.build32
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c35
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h34
-rw-r--r--drivers/net/kni/Makefile31
-rw-r--r--drivers/net/kni/rte_eth_kni.c37
-rw-r--r--drivers/net/liquidio/Makefile33
-rw-r--r--drivers/net/liquidio/base/lio_23xx_reg.h33
-rw-r--r--drivers/net/liquidio/base/lio_23xx_vf.c89
-rw-r--r--drivers/net/liquidio/base/lio_23xx_vf.h38
-rw-r--r--drivers/net/liquidio/base/lio_hw_defs.h35
-rw-r--r--drivers/net/liquidio/base/lio_mbox.c35
-rw-r--r--drivers/net/liquidio/base/lio_mbox.h33
-rw-r--r--drivers/net/liquidio/lio_ethdev.c206
-rw-r--r--drivers/net/liquidio/lio_ethdev.h33
-rw-r--r--drivers/net/liquidio/lio_logs.h53
-rw-r--r--drivers/net/liquidio/lio_rxtx.c116
-rw-r--r--drivers/net/liquidio/lio_rxtx.h33
-rw-r--r--drivers/net/liquidio/lio_struct.h39
-rw-r--r--drivers/net/meson.build12
-rw-r--r--drivers/net/mlx4/Makefile38
-rw-r--r--drivers/net/mlx4/mlx4.c214
-rw-r--r--drivers/net/mlx4/mlx4.h49
-rw-r--r--drivers/net/mlx4/mlx4_ethdev.c73
-rw-r--r--drivers/net/mlx4/mlx4_flow.c127
-rw-r--r--drivers/net/mlx4/mlx4_flow.h37
-rw-r--r--drivers/net/mlx4/mlx4_glue.c279
-rw-r--r--drivers/net/mlx4/mlx4_glue.h89
-rw-r--r--drivers/net/mlx4/mlx4_intr.c91
-rw-r--r--drivers/net/mlx4/mlx4_mr.c41
-rw-r--r--drivers/net/mlx4/mlx4_prm.h59
-rw-r--r--drivers/net/mlx4/mlx4_rxq.c167
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.c532
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.h44
-rw-r--r--drivers/net/mlx4/mlx4_txq.c160
-rw-r--r--drivers/net/mlx4/mlx4_utils.c34
-rw-r--r--drivers/net/mlx4/mlx4_utils.h40
-rw-r--r--drivers/net/mlx5/Makefile34
-rw-r--r--drivers/net/mlx5/mlx5.c531
-rw-r--r--drivers/net/mlx5/mlx5.h134
-rw-r--r--drivers/net/mlx5/mlx5_defs.h52
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c386
-rw-r--r--drivers/net/mlx5/mlx5_flow.c389
-rw-r--r--drivers/net/mlx5/mlx5_glue.c353
-rw-r--r--drivers/net/mlx5/mlx5_glue.h113
-rw-r--r--drivers/net/mlx5/mlx5_mac.c46
-rw-r--r--drivers/net/mlx5/mlx5_mr.c57
-rw-r--r--drivers/net/mlx5/mlx5_prm.h34
-rw-r--r--drivers/net/mlx5/mlx5_rss.c42
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c44
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c231
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c470
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h156
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.c126
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h48
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h135
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h77
-rw-r--r--drivers/net/mlx5/mlx5_socket.c33
-rw-r--r--drivers/net/mlx5/mlx5_stats.c68
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c86
-rw-r--r--drivers/net/mlx5/mlx5_txq.c380
-rw-r--r--drivers/net/mlx5/mlx5_utils.h34
-rw-r--r--drivers/net/mlx5/mlx5_vlan.c61
-rw-r--r--drivers/net/mrvl/Makefile4
-rw-r--r--drivers/net/mrvl/mrvl_ethdev.c565
-rw-r--r--drivers/net/mrvl/mrvl_ethdev.h8
-rw-r--r--drivers/net/mrvl/mrvl_qos.c2
-rw-r--r--drivers/net/nfp/Makefile32
-rw-r--r--drivers/net/nfp/nfp_net.c191
-rw-r--r--drivers/net/nfp/nfp_net_logs.h24
-rw-r--r--drivers/net/null/meson.build4
-rw-r--r--drivers/net/null/rte_eth_null.c17
-rw-r--r--drivers/net/octeontx/Makefile34
-rw-r--r--drivers/net/octeontx/base/meson.build21
-rw-r--r--drivers/net/octeontx/base/octeontx_bgx.c32
-rw-r--r--drivers/net/octeontx/base/octeontx_bgx.h32
-rw-r--r--drivers/net/octeontx/base/octeontx_io.h32
-rw-r--r--drivers/net/octeontx/base/octeontx_pki_var.h32
-rw-r--r--drivers/net/octeontx/base/octeontx_pkivf.c33
-rw-r--r--drivers/net/octeontx/base/octeontx_pkivf.h32
-rw-r--r--drivers/net/octeontx/base/octeontx_pkovf.c33
-rw-r--r--drivers/net/octeontx/base/octeontx_pkovf.h32
-rw-r--r--drivers/net/octeontx/meson.build15
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.c78
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.h41
-rw-r--r--drivers/net/octeontx/octeontx_logs.h75
-rw-r--r--drivers/net/octeontx/octeontx_rxtx.c34
-rw-r--r--drivers/net/octeontx/octeontx_rxtx.h34
-rw-r--r--drivers/net/octeontx/rte_pmd_octeontx_version.map7
-rw-r--r--drivers/net/pcap/Makefile35
-rw-r--r--drivers/net/pcap/meson.build22
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c44
-rw-r--r--drivers/net/qede/base/ecore_chain.h2
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c7
-rw-r--r--drivers/net/qede/base/ecore_dev.c2
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h2
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h2
-rw-r--r--drivers/net/qede/base/ecore_vf.c6
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h2
-rw-r--r--drivers/net/qede/qede_ethdev.c881
-rw-r--r--drivers/net/qede/qede_ethdev.h18
-rw-r--r--drivers/net/qede/qede_logs.h57
-rw-r--r--drivers/net/qede/qede_rxtx.c199
-rw-r--r--drivers/net/qede/qede_rxtx.h6
-rw-r--r--drivers/net/ring/Makefile32
-rw-r--r--drivers/net/ring/meson.build5
-rw-r--r--drivers/net/ring/rte_eth_ring.c37
-rw-r--r--drivers/net/ring/rte_eth_ring.h33
-rw-r--r--drivers/net/sfc/Makefile30
-rw-r--r--drivers/net/sfc/base/README24
-rw-r--r--drivers/net/sfc/base/ef10_ev.c71
-rw-r--r--drivers/net/sfc/base/ef10_filter.c40
-rw-r--r--drivers/net/sfc/base/ef10_impl.h117
-rw-r--r--drivers/net/sfc/base/ef10_intr.c30
-rw-r--r--drivers/net/sfc/base/ef10_mac.c30
-rw-r--r--drivers/net/sfc/base/ef10_mcdi.c32
-rw-r--r--drivers/net/sfc/base/ef10_nic.c44
-rw-r--r--drivers/net/sfc/base/ef10_nvram.c202
-rw-r--r--drivers/net/sfc/base/ef10_phy.c32
-rw-r--r--drivers/net/sfc/base/ef10_rx.c231
-rw-r--r--drivers/net/sfc/base/ef10_tlv_layout.h15
-rw-r--r--drivers/net/sfc/base/ef10_tx.c165
-rw-r--r--drivers/net/sfc/base/ef10_vpd.c30
-rw-r--r--drivers/net/sfc/base/efx.h347
-rw-r--r--drivers/net/sfc/base/efx_bootcfg.c46
-rw-r--r--drivers/net/sfc/base/efx_check.h45
-rw-r--r--drivers/net/sfc/base/efx_crc32.c30
-rw-r--r--drivers/net/sfc/base/efx_ev.c51
-rw-r--r--drivers/net/sfc/base/efx_filter.c30
-rw-r--r--drivers/net/sfc/base/efx_hash.c30
-rw-r--r--drivers/net/sfc/base/efx_impl.h117
-rw-r--r--drivers/net/sfc/base/efx_intr.c30
-rw-r--r--drivers/net/sfc/base/efx_lic.c228
-rw-r--r--drivers/net/sfc/base/efx_mac.c49
-rw-r--r--drivers/net/sfc/base/efx_mcdi.c38
-rw-r--r--drivers/net/sfc/base/efx_mcdi.h36
-rw-r--r--drivers/net/sfc/base/efx_mon.c34
-rw-r--r--drivers/net/sfc/base/efx_nic.c244
-rw-r--r--drivers/net/sfc/base/efx_nvram.c192
-rw-r--r--drivers/net/sfc/base/efx_phy.c32
-rw-r--r--drivers/net/sfc/base/efx_phy_ids.h30
-rw-r--r--drivers/net/sfc/base/efx_port.c31
-rw-r--r--drivers/net/sfc/base/efx_regs.h30
-rw-r--r--drivers/net/sfc/base/efx_regs_ef10.h30
-rw-r--r--drivers/net/sfc/base/efx_regs_mcdi.h25
-rw-r--r--drivers/net/sfc/base/efx_regs_pci.h30
-rw-r--r--drivers/net/sfc/base/efx_rx.c181
-rw-r--r--drivers/net/sfc/base/efx_sram.c30
-rw-r--r--drivers/net/sfc/base/efx_tunnel.c463
-rw-r--r--drivers/net/sfc/base/efx_tx.c145
-rw-r--r--drivers/net/sfc/base/efx_types.h33
-rw-r--r--drivers/net/sfc/base/efx_vpd.c32
-rw-r--r--drivers/net/sfc/base/hunt_impl.h30
-rw-r--r--drivers/net/sfc/base/hunt_nic.c30
-rw-r--r--drivers/net/sfc/base/mcdi_mon.c56
-rw-r--r--drivers/net/sfc/base/mcdi_mon.h30
-rw-r--r--drivers/net/sfc/base/medford_impl.h38
-rw-r--r--drivers/net/sfc/base/medford_nic.c33
-rw-r--r--drivers/net/sfc/base/meson.build74
-rw-r--r--drivers/net/sfc/base/siena_flash.h42
-rw-r--r--drivers/net/sfc/base/siena_impl.h54
-rw-r--r--drivers/net/sfc/base/siena_mac.c30
-rw-r--r--drivers/net/sfc/base/siena_mcdi.c30
-rw-r--r--drivers/net/sfc/base/siena_nic.c255
-rw-r--r--drivers/net/sfc/base/siena_nvram.c43
-rw-r--r--drivers/net/sfc/base/siena_phy.c32
-rw-r--r--drivers/net/sfc/base/siena_sram.c30
-rw-r--r--drivers/net/sfc/base/siena_vpd.c34
-rw-r--r--drivers/net/sfc/efsys.h28
-rw-r--r--drivers/net/sfc/meson.build64
-rw-r--r--drivers/net/sfc/rte_pmd_sfc_version.map (renamed from drivers/net/sfc/rte_pmd_sfc_efx_version.map)0
-rw-r--r--drivers/net/sfc/sfc.c171
-rw-r--r--drivers/net/sfc/sfc.h56
-rw-r--r--drivers/net/sfc/sfc_debug.h28
-rw-r--r--drivers/net/sfc/sfc_dp.c26
-rw-r--r--drivers/net/sfc/sfc_dp.h26
-rw-r--r--drivers/net/sfc/sfc_dp_rx.h59
-rw-r--r--drivers/net/sfc/sfc_dp_tx.h57
-rw-r--r--drivers/net/sfc/sfc_ef10.h26
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c196
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c84
-rw-r--r--drivers/net/sfc/sfc_ethdev.c231
-rw-r--r--drivers/net/sfc/sfc_ev.c53
-rw-r--r--drivers/net/sfc/sfc_ev.h28
-rw-r--r--drivers/net/sfc/sfc_filter.c26
-rw-r--r--drivers/net/sfc/sfc_filter.h26
-rw-r--r--drivers/net/sfc/sfc_flow.c34
-rw-r--r--drivers/net/sfc/sfc_flow.h26
-rw-r--r--drivers/net/sfc/sfc_intr.c35
-rw-r--r--drivers/net/sfc/sfc_kvargs.c26
-rw-r--r--drivers/net/sfc/sfc_kvargs.h26
-rw-r--r--drivers/net/sfc/sfc_log.h26
-rw-r--r--drivers/net/sfc/sfc_mcdi.c28
-rw-r--r--drivers/net/sfc/sfc_port.c50
-rw-r--r--drivers/net/sfc/sfc_rx.c218
-rw-r--r--drivers/net/sfc/sfc_rx.h33
-rw-r--r--drivers/net/sfc/sfc_tso.c26
-rw-r--r--drivers/net/sfc/sfc_tweak.h26
-rw-r--r--drivers/net/sfc/sfc_tx.c260
-rw-r--r--drivers/net/sfc/sfc_tx.h33
-rw-r--r--drivers/net/softnic/Makefile32
-rw-r--r--drivers/net/softnic/rte_eth_softnic.c37
-rw-r--r--drivers/net/softnic/rte_eth_softnic.h33
-rw-r--r--drivers/net/softnic/rte_eth_softnic_internals.h35
-rw-r--r--drivers/net/softnic/rte_eth_softnic_tm.c33
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c6
-rw-r--r--drivers/net/tap/Makefile61
-rw-r--r--drivers/net/tap/rte_eth_tap.c265
-rw-r--r--drivers/net/tap/rte_eth_tap.h50
-rw-r--r--drivers/net/tap/tap_bpf.h117
-rw-r--r--drivers/net/tap/tap_bpf_api.c190
-rw-r--r--drivers/net/tap/tap_bpf_insns.h1696
-rw-r--r--drivers/net/tap/tap_bpf_program.c224
-rw-r--r--drivers/net/tap/tap_flow.c825
-rw-r--r--drivers/net/tap/tap_flow.h47
-rw-r--r--drivers/net/tap/tap_intr.c110
-rw-r--r--drivers/net/tap/tap_netlink.c75
-rw-r--r--drivers/net/tap/tap_netlink.h59
-rw-r--r--drivers/net/tap/tap_rss.h34
-rw-r--r--drivers/net/tap/tap_tcmsgs.c62
-rw-r--r--drivers/net/tap/tap_tcmsgs.h38
-rw-r--r--drivers/net/thunderx/Makefile34
-rw-r--r--drivers/net/thunderx/base/meson.build15
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.c32
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.h32
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.c32
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.h32
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h32
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.c32
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.h32
-rw-r--r--drivers/net/thunderx/base/nicvf_plat.h32
-rw-r--r--drivers/net/thunderx/meson.build20
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c195
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.h48
-rw-r--r--drivers/net/thunderx/nicvf_logs.h75
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c36
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h34
-rw-r--r--drivers/net/thunderx/nicvf_struct.h36
-rw-r--r--drivers/net/thunderx/nicvf_svf.c32
-rw-r--r--drivers/net/thunderx/nicvf_svf.h32
-rw-r--r--drivers/net/thunderx/rte_pmd_thunderx_version.map (renamed from drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map)0
-rw-r--r--drivers/net/vdev_netvsc/Makefile32
-rw-r--r--drivers/net/vdev_netvsc/rte_pmd_vdev_netvsc_version.map4
-rw-r--r--drivers/net/vdev_netvsc/vdev_netvsc.c752
-rw-r--r--drivers/net/vhost/Makefile32
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c15
-rw-r--r--drivers/net/virtio/Makefile33
-rw-r--r--drivers/net/virtio/virtio_ethdev.c251
-rw-r--r--drivers/net/virtio/virtio_ethdev.h44
-rw-r--r--drivers/net/virtio/virtio_logs.h52
-rw-r--r--drivers/net/virtio/virtio_pci.c33
-rw-r--r--drivers/net/virtio/virtio_pci.h42
-rw-r--r--drivers/net/virtio/virtio_ring.h33
-rw-r--r--drivers/net/virtio/virtio_rxtx.c76
-rw-r--r--drivers/net/virtio/virtio_rxtx.h37
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c67
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.h35
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_neon.c38
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_sse.c37
-rw-r--r--drivers/net/virtio/virtio_user/vhost.h33
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel.c33
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c33
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.h33
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c33
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c43
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h33
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c39
-rw-r--r--drivers/net/virtio/virtqueue.c94
-rw-r--r--drivers/net/virtio/virtqueue.h61
-rw-r--r--drivers/net/vmxnet3/Makefile32
-rw-r--r--drivers/net/vmxnet3/base/upt1_defs.h22
-rw-r--r--drivers/net/vmxnet3/base/vmware_pack_begin.h33
-rw-r--r--drivers/net/vmxnet3/base/vmware_pack_end.h33
-rw-r--r--drivers/net/vmxnet3/base/vmxnet3_defs.h22
-rw-r--r--drivers/net/vmxnet3/base/vmxnet3_osdep.h33
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c55
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h33
-rw-r--r--drivers/net/vmxnet3/vmxnet3_logs.h50
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ring.h33
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c37
-rw-r--r--drivers/raw/Makefile9
-rw-r--r--drivers/raw/skeleton_rawdev/Makefile29
-rw-r--r--drivers/raw/skeleton_rawdev/rte_pmd_skeleton_rawdev_version.map4
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev.c755
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev.h136
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c450
793 files changed, 57437 insertions, 24273 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index db0cd76e..ee65c87b 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2015 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
@@ -36,9 +8,13 @@ DIRS-y += mempool
DEPDIRS-mempool := bus
DIRS-y += net
DEPDIRS-net := bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += bbdev
+DEPDIRS-bbdev := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
DEPDIRS-crypto := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
DEPDIRS-event := bus mempool net
+DIRS-$(CONFIG_RTE_LIBRTE_RAWDEV) += raw
+DEPDIRS-raw := bus mempool net event
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bbdev/Makefile b/drivers/bbdev/Makefile
new file mode 100644
index 00000000..4ec83b0a
--- /dev/null
+++ b/drivers/bbdev/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+core-libs := librte_eal librte_mbuf librte_mempool librte_ring
+core-libs += librte_bbdev librte_kvargs librte_cfgfile
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL) += null
+DEPDIRS-null = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += turbo_sw
+DEPDIRS-turbo_sw = $(core-libs)
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bbdev/null/Makefile b/drivers/bbdev/null/Makefile
new file mode 100644
index 00000000..f885a97b
--- /dev/null
+++ b/drivers/bbdev/null/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+# library name
+LIB = librte_pmd_bbdev_null.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lrte_kvargs
+LDLIBS += -lrte_bbdev
+LDLIBS += -lrte_bus_vdev
+
+# versioning export map
+EXPORT_MAP := rte_pmd_bbdev_null_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL) += bbdev_null.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bbdev/null/bbdev_null.c b/drivers/bbdev/null/bbdev_null.c
new file mode 100644
index 00000000..6bc84917
--- /dev/null
+++ b/drivers/bbdev/null/bbdev_null.c
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_kvargs.h>
+
+#include <rte_bbdev.h>
+#include <rte_bbdev_pmd.h>
+
+#define DRIVER_NAME bbdev_null
+
+/* NULL BBDev logging ID */
+static int bbdev_null_logtype;
+
+/* Helper macro for logging */
+#define rte_bbdev_log(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, bbdev_null_logtype, fmt "\n", ##__VA_ARGS__)
+
+#define rte_bbdev_log_debug(fmt, ...) \
+ rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
+ ##__VA_ARGS__)
+
+/* Initialisation params structure that can be used by null BBDEV driver */
+struct bbdev_null_params {
+ int socket_id; /*< Null BBDEV socket */
+ uint16_t queues_num; /*< Null BBDEV queues number */
+};
+
+/* Accecptable params for null BBDEV devices */
+#define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues"
+#define BBDEV_NULL_SOCKET_ID_ARG "socket_id"
+
+static const char * const bbdev_null_valid_params[] = {
+ BBDEV_NULL_MAX_NB_QUEUES_ARG,
+ BBDEV_NULL_SOCKET_ID_ARG
+};
+
+/* private data structure */
+struct bbdev_private {
+ unsigned int max_nb_queues; /**< Max number of queues */
+};
+
+/* queue */
+struct bbdev_queue {
+ struct rte_ring *processed_pkts; /* Ring for processed packets */
+} __rte_cache_aligned;
+
+/* Get device info */
+static void
+info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
+{
+ struct bbdev_private *internals = dev->data->dev_private;
+
+ static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
+ RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
+ };
+
+ static struct rte_bbdev_queue_conf default_queue_conf = {
+ .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
+ };
+
+ default_queue_conf.socket = dev->data->socket_id;
+
+ dev_info->driver_name = RTE_STR(DRIVER_NAME);
+ dev_info->max_num_queues = internals->max_nb_queues;
+ dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
+ dev_info->hardware_accelerated = false;
+ dev_info->max_queue_priority = 0;
+ dev_info->default_queue_conf = default_queue_conf;
+ dev_info->capabilities = bbdev_capabilities;
+ dev_info->cpu_flag_reqs = NULL;
+ dev_info->min_alignment = 0;
+
+ rte_bbdev_log_debug("got device info from %u", dev->data->dev_id);
+}
+
+/* Release queue */
+static int
+q_release(struct rte_bbdev *dev, uint16_t q_id)
+{
+ struct bbdev_queue *q = dev->data->queues[q_id].queue_private;
+
+ if (q != NULL) {
+ rte_ring_free(q->processed_pkts);
+ rte_free(q);
+ dev->data->queues[q_id].queue_private = NULL;
+ }
+
+ rte_bbdev_log_debug("released device queue %u:%u",
+ dev->data->dev_id, q_id);
+ return 0;
+}
+
+/* Setup a queue */
+static int
+q_setup(struct rte_bbdev *dev, uint16_t q_id,
+ const struct rte_bbdev_queue_conf *queue_conf)
+{
+ struct bbdev_queue *q;
+ char ring_name[RTE_RING_NAMESIZE];
+ snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u",
+ dev->data->dev_id, q_id);
+
+ /* Allocate the queue data structure. */
+ q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate queue memory");
+ return -ENOMEM;
+ }
+
+ q->processed_pkts = rte_ring_create(ring_name, queue_conf->queue_size,
+ queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (q->processed_pkts == NULL) {
+ rte_bbdev_log(ERR, "Failed to create ring");
+ goto free_q;
+ }
+
+ dev->data->queues[q_id].queue_private = q;
+ rte_bbdev_log_debug("setup device queue %s", ring_name);
+ return 0;
+
+free_q:
+ rte_free(q);
+ return -EFAULT;
+}
+
+static const struct rte_bbdev_ops pmd_ops = {
+ .info_get = info_get,
+ .queue_setup = q_setup,
+ .queue_release = q_release
+};
+
+/* Enqueue decode burst */
+static uint16_t
+enqueue_dec_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+ struct bbdev_queue *q = q_data->queue_private;
+ uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+
+ q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
+ q_data->queue_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/* Enqueue encode burst */
+static uint16_t
+enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+ struct bbdev_queue *q = q_data->queue_private;
+ uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+
+ q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
+ q_data->queue_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/* Dequeue decode burst */
+static uint16_t
+dequeue_dec_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+ struct bbdev_queue *q = q_data->queue_private;
+ uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ q_data->queue_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Dequeue encode burst */
+static uint16_t
+dequeue_enc_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+ struct bbdev_queue *q = q_data->queue_private;
+ uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ q_data->queue_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Parse 16bit integer from string argument */
+static inline int
+parse_u16_arg(const char *key, const char *value, void *extra_args)
+{
+ uint16_t *u16 = extra_args;
+ unsigned int long result;
+
+ if ((value == NULL) || (extra_args == NULL))
+ return -EINVAL;
+ errno = 0;
+ result = strtoul(value, NULL, 0);
+ if ((result >= (1 << 16)) || (errno != 0)) {
+ rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key);
+ return -ERANGE;
+ }
+ *u16 = (uint16_t)result;
+ return 0;
+}
+
+/* Parse parameters used to create device */
+static int
+parse_bbdev_null_params(struct bbdev_null_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args, bbdev_null_valid_params);
+ if (kvlist == NULL)
+ return -EFAULT;
+
+ ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[0],
+ &parse_u16_arg, &params->queues_num);
+ if (ret < 0)
+ goto exit;
+
+ ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[1],
+ &parse_u16_arg, &params->socket_id);
+ if (ret < 0)
+ goto exit;
+
+ if (params->socket_id >= RTE_MAX_NUMA_NODES) {
+ rte_bbdev_log(ERR, "Invalid socket, must be < %u",
+ RTE_MAX_NUMA_NODES);
+ goto exit;
+ }
+ }
+
+exit:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+/* Create device */
+static int
+null_bbdev_create(struct rte_vdev_device *vdev,
+ struct bbdev_null_params *init_params)
+{
+ struct rte_bbdev *bbdev;
+ const char *name = rte_vdev_device_name(vdev);
+
+ bbdev = rte_bbdev_allocate(name);
+ if (bbdev == NULL)
+ return -ENODEV;
+
+ bbdev->data->dev_private = rte_zmalloc_socket(name,
+ sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE,
+ init_params->socket_id);
+ if (bbdev->data->dev_private == NULL) {
+ rte_bbdev_release(bbdev);
+ return -ENOMEM;
+ }
+
+ bbdev->dev_ops = &pmd_ops;
+ bbdev->device = &vdev->device;
+ bbdev->data->socket_id = init_params->socket_id;
+ bbdev->intr_handle = NULL;
+
+ /* register rx/tx burst functions for data path */
+ bbdev->dequeue_enc_ops = dequeue_enc_ops;
+ bbdev->dequeue_dec_ops = dequeue_dec_ops;
+ bbdev->enqueue_enc_ops = enqueue_enc_ops;
+ bbdev->enqueue_dec_ops = enqueue_dec_ops;
+ ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues =
+ init_params->queues_num;
+
+ return 0;
+}
+
+/* Initialise device */
+static int
+null_bbdev_probe(struct rte_vdev_device *vdev)
+{
+ struct bbdev_null_params init_params = {
+ rte_socket_id(),
+ RTE_BBDEV_DEFAULT_MAX_NB_QUEUES
+ };
+ const char *name;
+ const char *input_args;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+ parse_bbdev_null_params(&init_params, input_args);
+
+ rte_bbdev_log_debug("Init %s on NUMA node %d with max queues: %d",
+ name, init_params.socket_id, init_params.queues_num);
+
+ return null_bbdev_create(vdev, &init_params);
+}
+
+/* Uninitialise device */
+static int
+null_bbdev_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_bbdev *bbdev;
+ const char *name;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ bbdev = rte_bbdev_get_named_dev(name);
+ if (bbdev == NULL)
+ return -EINVAL;
+
+ rte_free(bbdev->data->dev_private);
+
+ return rte_bbdev_release(bbdev);
+}
+
+static struct rte_vdev_driver bbdev_null_pmd_drv = {
+ .probe = null_bbdev_probe,
+ .remove = null_bbdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_null_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
+ BBDEV_NULL_MAX_NB_QUEUES_ARG"=<int> "
+ BBDEV_NULL_SOCKET_ID_ARG"=<int>");
+
+RTE_INIT(null_bbdev_init_log);
+static void
+null_bbdev_init_log(void)
+{
+ bbdev_null_logtype = rte_log_register("pmd.bb.null");
+ if (bbdev_null_logtype >= 0)
+ rte_log_set_level(bbdev_null_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/bbdev/null/rte_pmd_bbdev_null_version.map b/drivers/bbdev/null/rte_pmd_bbdev_null_version.map
new file mode 100644
index 00000000..58b94270
--- /dev/null
+++ b/drivers/bbdev/null/rte_pmd_bbdev_null_version.map
@@ -0,0 +1,3 @@
+DPDK_18.02 {
+ local: *;
+};
diff --git a/drivers/bbdev/turbo_sw/Makefile b/drivers/bbdev/turbo_sw/Makefile
new file mode 100644
index 00000000..79eb5547
--- /dev/null
+++ b/drivers/bbdev/turbo_sw/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(FLEXRAN_SDK),)
+$(error "Please define FLEXRAN_SDK environment variable")
+endif
+
+# library name
+LIB = librte_pmd_bbdev_turbo_sw.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lrte_kvargs
+LDLIBS += -lrte_bbdev
+LDLIBS += -lrte_bus_vdev
+
+# versioning export map
+EXPORT_MAP := rte_pmd_bbdev_turbo_sw_version.map
+
+# external library dependencies
+CFLAGS += -I$(FLEXRAN_SDK)/lib_common
+CFLAGS += -I$(FLEXRAN_SDK)/lib_turbo
+CFLAGS += -I$(FLEXRAN_SDK)/lib_crc
+CFLAGS += -I$(FLEXRAN_SDK)/lib_rate_matching
+
+LDLIBS += -L$(FLEXRAN_SDK)/lib_crc -lcrc
+LDLIBS += -L$(FLEXRAN_SDK)/lib_turbo -lturbo
+LDLIBS += -L$(FLEXRAN_SDK)/lib_rate_matching -lrate_matching
+LDLIBS += -L$(FLEXRAN_SDK)/lib_common -lcommon
+LDLIBS += -lstdc++ -lirc -limf -lipps
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW) += bbdev_turbo_software.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bbdev/turbo_sw/bbdev_turbo_software.c b/drivers/bbdev/turbo_sw/bbdev_turbo_software.c
new file mode 100644
index 00000000..302abf5c
--- /dev/null
+++ b/drivers/bbdev/turbo_sw/bbdev_turbo_software.c
@@ -0,0 +1,1217 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_kvargs.h>
+
+#include <rte_bbdev.h>
+#include <rte_bbdev_pmd.h>
+
+#include <phy_turbo.h>
+#include <phy_crc.h>
+#include <phy_rate_match.h>
+#include <divide.h>
+
+#define DRIVER_NAME turbo_sw
+
+/* Turbo SW PMD logging ID */
+static int bbdev_turbo_sw_logtype;
+
+/* Helper macro for logging */
+#define rte_bbdev_log(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, bbdev_turbo_sw_logtype, fmt "\n", \
+ ##__VA_ARGS__)
+
+#define rte_bbdev_log_debug(fmt, ...) \
+ rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
+ ##__VA_ARGS__)
+
+/* Number of columns in sub-block interleaver (36.212, section 5.1.4.1.1) */
+#define C_SUBBLOCK (32)
+#define MAX_TB_SIZE (391656)
+#define MAX_CB_SIZE (6144)
+#define MAX_KW (18528)
+
+/* private data structure */
+struct bbdev_private {
+ unsigned int max_nb_queues; /**< Max number of queues */
+};
+
+/* Initialisation params structure that can be used by Turbo SW driver */
+struct turbo_sw_params {
+ int socket_id; /*< Turbo SW device socket */
+ uint16_t queues_num; /*< Turbo SW device queues number */
+};
+
+/* Accecptable params for Turbo SW devices */
+#define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues"
+#define TURBO_SW_SOCKET_ID_ARG "socket_id"
+
+static const char * const turbo_sw_valid_params[] = {
+ TURBO_SW_MAX_NB_QUEUES_ARG,
+ TURBO_SW_SOCKET_ID_ARG
+};
+
+/* queue */
+struct turbo_sw_queue {
+ /* Ring for processed (encoded/decoded) operations which are ready to
+ * be dequeued.
+ */
+ struct rte_ring *processed_pkts;
+ /* Stores input for turbo encoder (used when CRC attachment is
+ * performed
+ */
+ uint8_t *enc_in;
+ /* Stores output from turbo encoder */
+ uint8_t *enc_out;
+ /* Alpha gamma buf for bblib_turbo_decoder() function */
+ int8_t *ag;
+ /* Temp buf for bblib_turbo_decoder() function */
+ uint16_t *code_block;
+ /* Input buf for bblib_rate_dematching_lte() function */
+ uint8_t *deint_input;
+ /* Output buf for bblib_rate_dematching_lte() function */
+ uint8_t *deint_output;
+ /* Output buf for bblib_turbodec_adapter_lte() function */
+ uint8_t *adapter_output;
+ /* Operation type of this queue */
+ enum rte_bbdev_op_type type;
+} __rte_cache_aligned;
+
+/* Calculate index based on Table 5.1.3-3 from TS34.212 */
+static inline int32_t
+compute_idx(uint16_t k)
+{
+ int32_t result = 0;
+
+ if (k < 40 || k > MAX_CB_SIZE)
+ return -1;
+
+ if (k > 2048) {
+ if ((k - 2048) % 64 != 0)
+ result = -1;
+
+ result = 124 + (k - 2048) / 64;
+ } else if (k <= 512) {
+ if ((k - 40) % 8 != 0)
+ result = -1;
+
+ result = (k - 40) / 8 + 1;
+ } else if (k <= 1024) {
+ if ((k - 512) % 16 != 0)
+ result = -1;
+
+ result = 60 + (k - 512) / 16;
+ } else { /* 1024 < k <= 2048 */
+ if ((k - 1024) % 32 != 0)
+ result = -1;
+
+ result = 92 + (k - 1024) / 32;
+ }
+
+ return result;
+}
+
+/* Read flag value 0/1 from bitmap */
+static inline bool
+check_bit(uint32_t bitmap, uint32_t bitmask)
+{
+ return bitmap & bitmask;
+}
+
+/* Get device info */
+static void
+info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
+{
+ struct bbdev_private *internals = dev->data->dev_private;
+
+ static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
+ {
+ .type = RTE_BBDEV_OP_TURBO_DEC,
+ .cap.turbo_dec = {
+ .capability_flags =
+ RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
+ RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN |
+ RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
+ RTE_BBDEV_TURBO_CRC_TYPE_24B |
+ RTE_BBDEV_TURBO_EARLY_TERMINATION,
+ .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
+ .num_buffers_hard_out =
+ RTE_BBDEV_MAX_CODE_BLOCKS,
+ .num_buffers_soft_out = 0,
+ }
+ },
+ {
+ .type = RTE_BBDEV_OP_TURBO_ENC,
+ .cap.turbo_enc = {
+ .capability_flags =
+ RTE_BBDEV_TURBO_CRC_24B_ATTACH |
+ RTE_BBDEV_TURBO_CRC_24A_ATTACH |
+ RTE_BBDEV_TURBO_RATE_MATCH |
+ RTE_BBDEV_TURBO_RV_INDEX_BYPASS,
+ .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
+ .num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS,
+ }
+ },
+ RTE_BBDEV_END_OF_CAPABILITIES_LIST()
+ };
+
+ static struct rte_bbdev_queue_conf default_queue_conf = {
+ .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
+ };
+
+ static const enum rte_cpu_flag_t cpu_flag = RTE_CPUFLAG_SSE4_2;
+
+ default_queue_conf.socket = dev->data->socket_id;
+
+ dev_info->driver_name = RTE_STR(DRIVER_NAME);
+ dev_info->max_num_queues = internals->max_nb_queues;
+ dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
+ dev_info->hardware_accelerated = false;
+ dev_info->max_queue_priority = 0;
+ dev_info->default_queue_conf = default_queue_conf;
+ dev_info->capabilities = bbdev_capabilities;
+ dev_info->cpu_flag_reqs = &cpu_flag;
+ dev_info->min_alignment = 64;
+
+ rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id);
+}
+
+/* Release queue */
+static int
+q_release(struct rte_bbdev *dev, uint16_t q_id)
+{
+ struct turbo_sw_queue *q = dev->data->queues[q_id].queue_private;
+
+ if (q != NULL) {
+ rte_ring_free(q->processed_pkts);
+ rte_free(q->enc_out);
+ rte_free(q->enc_in);
+ rte_free(q->ag);
+ rte_free(q->code_block);
+ rte_free(q->deint_input);
+ rte_free(q->deint_output);
+ rte_free(q->adapter_output);
+ rte_free(q);
+ dev->data->queues[q_id].queue_private = NULL;
+ }
+
+ rte_bbdev_log_debug("released device queue %u:%u",
+ dev->data->dev_id, q_id);
+ return 0;
+}
+
+/* Setup a queue */
+static int
+q_setup(struct rte_bbdev *dev, uint16_t q_id,
+ const struct rte_bbdev_queue_conf *queue_conf)
+{
+ int ret;
+ struct turbo_sw_queue *q;
+ char name[RTE_RING_NAMESIZE];
+
+ /* Allocate the queue data structure. */
+ q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate queue memory");
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for encoder output. */
+ ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_out%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->enc_out = rte_zmalloc_socket(name,
+ ((MAX_TB_SIZE >> 3) + 3) * sizeof(*q->enc_out) * 3,
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->enc_out == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for rate matching output. */
+ ret = snprintf(name, RTE_RING_NAMESIZE,
+ RTE_STR(DRIVER_NAME)"_enc_in%u:%u", dev->data->dev_id,
+ q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->enc_in = rte_zmalloc_socket(name,
+ (MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->enc_in == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for Aplha Gamma temp buffer. */
+ ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_ag%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->ag = rte_zmalloc_socket(name,
+ MAX_CB_SIZE * 10 * sizeof(*q->ag),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->ag == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for code block temp buffer. */
+ ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_cb%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->code_block = rte_zmalloc_socket(name,
+ (6144 >> 3) * sizeof(*q->code_block),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->code_block == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for Deinterleaver input. */
+ ret = snprintf(name, RTE_RING_NAMESIZE,
+ RTE_STR(DRIVER_NAME)"_deint_input%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->deint_input = rte_zmalloc_socket(name,
+ MAX_KW * sizeof(*q->deint_input),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->deint_input == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for Deinterleaver output. */
+ ret = snprintf(name, RTE_RING_NAMESIZE,
+ RTE_STR(DRIVER_NAME)"_deint_output%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->deint_output = rte_zmalloc_socket(NULL,
+ MAX_KW * sizeof(*q->deint_output),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->deint_output == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Allocate memory for Adapter output. */
+ ret = snprintf(name, RTE_RING_NAMESIZE,
+ RTE_STR(DRIVER_NAME)"_adapter_output%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->adapter_output = rte_zmalloc_socket(NULL,
+ MAX_CB_SIZE * 6 * sizeof(*q->adapter_output),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q->adapter_output == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate queue memory for %s", name);
+ goto free_q;
+ }
+
+ /* Create ring for packets awaiting to be dequeued. */
+ ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"%u:%u",
+ dev->data->dev_id, q_id);
+ if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
+ rte_bbdev_log(ERR,
+ "Creating queue name for device %u queue %u failed",
+ dev->data->dev_id, q_id);
+ return -ENAMETOOLONG;
+ }
+ q->processed_pkts = rte_ring_create(name, queue_conf->queue_size,
+ queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (q->processed_pkts == NULL) {
+ rte_bbdev_log(ERR, "Failed to create ring for %s", name);
+ goto free_q;
+ }
+
+ q->type = queue_conf->op_type;
+
+ dev->data->queues[q_id].queue_private = q;
+ rte_bbdev_log_debug("setup device queue %s", name);
+ return 0;
+
+free_q:
+ rte_ring_free(q->processed_pkts);
+ rte_free(q->enc_out);
+ rte_free(q->enc_in);
+ rte_free(q->ag);
+ rte_free(q->code_block);
+ rte_free(q->deint_input);
+ rte_free(q->deint_output);
+ rte_free(q->adapter_output);
+ rte_free(q);
+ return -EFAULT;
+}
+
+static const struct rte_bbdev_ops pmd_ops = {
+ .info_get = info_get,
+ .queue_setup = q_setup,
+ .queue_release = q_release
+};
+
+/* Checks if the encoder input buffer is correct.
+ * Returns 0 if it's valid, -1 otherwise.
+ */
+static inline int
+is_enc_input_valid(const uint16_t k, const int32_t k_idx,
+ const uint16_t in_length)
+{
+ if (k_idx < 0) {
+ rte_bbdev_log(ERR, "K Index is invalid");
+ return -1;
+ }
+
+ if (in_length - (k >> 3) < 0) {
+ rte_bbdev_log(ERR,
+ "Mismatch between input length (%u bytes) and K (%u bits)",
+ in_length, k);
+ return -1;
+ }
+
+ if (k > MAX_CB_SIZE) {
+ rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
+ k, MAX_CB_SIZE);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Checks if the decoder input buffer is correct.
+ * Returns 0 if it's valid, -1 otherwise.
+ */
+static inline int
+is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length)
+{
+ if (k_idx < 0) {
+ rte_bbdev_log(ERR, "K index is invalid");
+ return -1;
+ }
+
+ if (in_length - kw < 0) {
+ rte_bbdev_log(ERR,
+ "Mismatch between input length (%u) and kw (%u)",
+ in_length, kw);
+ return -1;
+ }
+
+ if (kw > MAX_KW) {
+ rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d",
+ kw, MAX_KW);
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline void
+process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
+ uint8_t cb_idx, uint8_t c, uint16_t k, uint16_t ncb,
+ uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out,
+ uint16_t in_offset, uint16_t out_offset, uint16_t total_left)
+{
+ int ret;
+ int16_t k_idx;
+ uint16_t m;
+ uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out;
+ struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
+ struct bblib_crc_request crc_req;
+ struct bblib_turbo_encoder_request turbo_req;
+ struct bblib_turbo_encoder_response turbo_resp;
+ struct bblib_rate_match_dl_request rm_req;
+ struct bblib_rate_match_dl_response rm_resp;
+
+ k_idx = compute_idx(k);
+ in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
+
+ /* CRC24A (for TB) */
+ if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH) &&
+ (enc->code_block_mode == 1)) {
+ ret = is_enc_input_valid(k - 24, k_idx, total_left);
+ if (ret != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+ /* copy the input to the temporary buffer to be able to extend
+ * it by 3 CRC bytes
+ */
+ rte_memcpy(q->enc_in, in, (k - 24) >> 3);
+ crc_req.data = q->enc_in;
+ crc_req.len = (k - 24) >> 3;
+ if (bblib_lte_crc24a_gen(&crc_req) == -1) {
+ op->status |= 1 << RTE_BBDEV_CRC_ERROR;
+ rte_bbdev_log(ERR, "CRC24a generation failed");
+ return;
+ }
+ in = q->enc_in;
+ } else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) {
+ /* CRC24B */
+ ret = is_enc_input_valid(k - 24, k_idx, total_left);
+ if (ret != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+ /* copy the input to the temporary buffer to be able to extend
+ * it by 3 CRC bytes
+ */
+ rte_memcpy(q->enc_in, in, (k - 24) >> 3);
+ crc_req.data = q->enc_in;
+ crc_req.len = (k - 24) >> 3;
+ if (bblib_lte_crc24b_gen(&crc_req) == -1) {
+ op->status |= 1 << RTE_BBDEV_CRC_ERROR;
+ rte_bbdev_log(ERR, "CRC24b generation failed");
+ return;
+ }
+ in = q->enc_in;
+ } else {
+ ret = is_enc_input_valid(k, k_idx, total_left);
+ if (ret != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+ }
+
+ /* Turbo encoder */
+
+ /* Each bit layer output from turbo encoder is (k+4) bits long, i.e.
+ * input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up.
+ * So dst_data's length should be 3*(k/8) + 3 bytes.
+ */
+ out0 = q->enc_out;
+ out1 = RTE_PTR_ADD(out0, (k >> 3) + 1);
+ out2 = RTE_PTR_ADD(out1, (k >> 3) + 1);
+
+ turbo_req.case_id = k_idx;
+ turbo_req.input_win = in;
+ turbo_req.length = k >> 3;
+ turbo_resp.output_win_0 = out0;
+ turbo_resp.output_win_1 = out1;
+ turbo_resp.output_win_2 = out2;
+ if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) {
+ op->status |= 1 << RTE_BBDEV_DRV_ERROR;
+ rte_bbdev_log(ERR, "Turbo Encoder failed");
+ return;
+ }
+
+ /* Rate-matching */
+ if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
+ /* get output data starting address */
+ rm_out = (uint8_t *)rte_pktmbuf_append(m_out, (e >> 3));
+ if (rm_out == NULL) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR,
+ "Too little space in output mbuf");
+ return;
+ }
+ /* rte_bbdev_op_data.offset can be different than the offset
+ * of the appended bytes
+ */
+ rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
+
+ /* index of current code block */
+ rm_req.r = cb_idx;
+ /* total number of code block */
+ rm_req.C = c;
+ /* For DL - 1, UL - 0 */
+ rm_req.direction = 1;
+ /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nsoft, KMIMO
+ * and MDL_HARQ are used for Ncb calculation. As Ncb is already
+ * known we can adjust those parameters
+ */
+ rm_req.Nsoft = ncb * rm_req.C;
+ rm_req.KMIMO = 1;
+ rm_req.MDL_HARQ = 1;
+ /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nl, Qm and G
+ * are used for E calculation. As E is already known we can
+ * adjust those parameters
+ */
+ rm_req.NL = e;
+ rm_req.Qm = 1;
+ rm_req.G = rm_req.NL * rm_req.Qm * rm_req.C;
+
+ rm_req.rvidx = enc->rv_index;
+ rm_req.Kidx = k_idx - 1;
+ rm_req.nLen = k + 4;
+ rm_req.tin0 = out0;
+ rm_req.tin1 = out1;
+ rm_req.tin2 = out2;
+ rm_resp.output = rm_out;
+ rm_resp.OutputLen = (e >> 3);
+ if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS)
+ rm_req.bypass_rvidx = 1;
+ else
+ rm_req.bypass_rvidx = 0;
+
+ if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) {
+ op->status |= 1 << RTE_BBDEV_DRV_ERROR;
+ rte_bbdev_log(ERR, "Rate matching failed");
+ return;
+ }
+ enc->output.length += rm_resp.OutputLen;
+ } else {
+ /* Rate matching is bypassed */
+
+ /* Completing last byte of out0 (where 4 tail bits are stored)
+ * by moving first 4 bits from out1
+ */
+ tmp_out = (uint8_t *) --out1;
+ *tmp_out = *tmp_out | ((*(tmp_out + 1) & 0xF0) >> 4);
+ tmp_out++;
+ /* Shifting out1 data by 4 bits to the left */
+ for (m = 0; m < k >> 3; ++m) {
+ uint8_t *first = tmp_out;
+ uint8_t second = *(tmp_out + 1);
+ *first = (*first << 4) | ((second & 0xF0) >> 4);
+ tmp_out++;
+ }
+ /* Shifting out2 data by 8 bits to the left */
+ for (m = 0; m < (k >> 3) + 1; ++m) {
+ *tmp_out = *(tmp_out + 1);
+ tmp_out++;
+ }
+ *tmp_out = 0;
+
+ /* copy shifted output to turbo_enc entity */
+ out0 = (uint8_t *)rte_pktmbuf_append(m_out,
+ (k >> 3) * 3 + 2);
+ if (out0 == NULL) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR,
+ "Too little space in output mbuf");
+ return;
+ }
+ enc->output.length += (k >> 3) * 3 + 2;
+ /* rte_bbdev_op_data.offset can be different than the
+ * offset of the appended bytes
+ */
+ out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
+ out_offset);
+ rte_memcpy(out0, q->enc_out, (k >> 3) * 3 + 2);
+ }
+}
+
+static inline void
+enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
+{
+ uint8_t c, r, crc24_bits = 0;
+ uint16_t k, ncb;
+ uint32_t e;
+ struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
+ uint16_t in_offset = enc->input.offset;
+ uint16_t out_offset = enc->output.offset;
+ struct rte_mbuf *m_in = enc->input.data;
+ struct rte_mbuf *m_out = enc->output.data;
+ uint16_t total_left = enc->input.length;
+
+ /* Clear op status */
+ op->status = 0;
+
+ if (total_left > MAX_TB_SIZE >> 3) {
+ rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d",
+ total_left, MAX_TB_SIZE);
+ op->status = 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+
+ if (m_in == NULL || m_out == NULL) {
+ rte_bbdev_log(ERR, "Invalid mbuf pointer");
+ op->status = 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+
+ if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) ||
+ (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH))
+ crc24_bits = 24;
+
+ if (enc->code_block_mode == 0) { /* For Transport Block mode */
+ c = enc->tb_params.c;
+ r = enc->tb_params.r;
+ } else {/* For Code Block mode */
+ c = 1;
+ r = 0;
+ }
+
+ while (total_left > 0 && r < c) {
+ if (enc->code_block_mode == 0) {
+ k = (r < enc->tb_params.c_neg) ?
+ enc->tb_params.k_neg : enc->tb_params.k_pos;
+ ncb = (r < enc->tb_params.c_neg) ?
+ enc->tb_params.ncb_neg : enc->tb_params.ncb_pos;
+ e = (r < enc->tb_params.cab) ?
+ enc->tb_params.ea : enc->tb_params.eb;
+ } else {
+ k = enc->cb_params.k;
+ ncb = enc->cb_params.ncb;
+ e = enc->cb_params.e;
+ }
+
+ process_enc_cb(q, op, r, c, k, ncb, e, m_in,
+ m_out, in_offset, out_offset, total_left);
+ /* Update total_left */
+ total_left -= (k - crc24_bits) >> 3;
+ /* Update offsets for next CBs (if exist) */
+ in_offset += (k - crc24_bits) >> 3;
+ if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH)
+ out_offset += e >> 3;
+ else
+ out_offset += (k >> 3) * 3 + 2;
+ r++;
+ }
+
+ /* check if all input data was processed */
+ if (total_left != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR,
+ "Mismatch between mbuf length and included CBs sizes");
+ }
+}
+
+static inline uint16_t
+enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; ++i)
+ enqueue_enc_one_op(q, ops[i]);
+
+ return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
+ NULL);
+}
+
+/* Remove the padding bytes from a cyclic buffer.
+ * The input buffer is a data stream wk as described in 3GPP TS 36.212 section
+ * 5.1.4.1.2 starting from w0 and with length Ncb bytes.
+ * The output buffer is a data stream wk with pruned padding bytes. It's length
+ * is 3*D bytes and the order of non-padding bytes is preserved.
+ */
+static inline void
+remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k,
+ uint16_t ncb)
+{
+ uint32_t in_idx, out_idx, c_idx;
+ const uint32_t d = k + 4;
+ const uint32_t kw = (ncb / 3);
+ const uint32_t nd = kw - d;
+ const uint32_t r_subblock = kw / C_SUBBLOCK;
+ /* Inter-column permutation pattern */
+ const uint32_t P[C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10,
+ 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 29, 3, 19,
+ 11, 27, 7, 23, 15, 31};
+ in_idx = 0;
+ out_idx = 0;
+
+ /* The padding bytes are at the first Nd positions in the first row. */
+ for (c_idx = 0; in_idx < kw; in_idx += r_subblock, ++c_idx) {
+ if (P[c_idx] < nd) {
+ rte_memcpy(&out[out_idx], &in[in_idx + 1],
+ r_subblock - 1);
+ out_idx += r_subblock - 1;
+ } else {
+ rte_memcpy(&out[out_idx], &in[in_idx], r_subblock);
+ out_idx += r_subblock;
+ }
+ }
+
+ /* First and second parity bits sub-blocks are interlaced. */
+ for (c_idx = 0; in_idx < ncb - 2 * r_subblock;
+ in_idx += 2 * r_subblock, ++c_idx) {
+ uint32_t second_block_c_idx = P[c_idx];
+ uint32_t third_block_c_idx = P[c_idx] + 1;
+
+ if (second_block_c_idx < nd && third_block_c_idx < nd) {
+ rte_memcpy(&out[out_idx], &in[in_idx + 2],
+ 2 * r_subblock - 2);
+ out_idx += 2 * r_subblock - 2;
+ } else if (second_block_c_idx >= nd &&
+ third_block_c_idx >= nd) {
+ rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock);
+ out_idx += 2 * r_subblock;
+ } else if (second_block_c_idx < nd) {
+ out[out_idx++] = in[in_idx];
+ rte_memcpy(&out[out_idx], &in[in_idx + 2],
+ 2 * r_subblock - 2);
+ out_idx += 2 * r_subblock - 2;
+ } else {
+ rte_memcpy(&out[out_idx], &in[in_idx + 1],
+ 2 * r_subblock - 1);
+ out_idx += 2 * r_subblock - 1;
+ }
+ }
+
+ /* Last interlaced row is different - its last byte is the only padding
+ * byte. We can have from 2 up to 26 padding bytes (Nd) per sub-block.
+ * After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1
+ * or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes
+ * (moving to another column). 2nd parity sub-block uses the same
+ * inter-column permutation pattern as the systematic and 1st parity
+ * sub-blocks but it adds '1' to the resulting index and calculates the
+ * modulus of the result and Kw. Last column is mapped to itself (id 31)
+ * so the first byte taken from the 2nd parity sub-block will be the
+ * 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the
+ * last byte will be the first byte from the sub-block:
+ * (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller
+ * than 2 so we know that bytes with ids 0 and 1 must be the padding
+ * bytes. The bytes from the 1st parity sub-block are the bytes from the
+ * 31st column - Nd can't be greater than 26 so we are sure that there
+ * are no padding bytes in 31st column.
+ */
+ rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1);
+}
+
+static inline void
+move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k,
+ uint16_t ncb)
+{
+ uint16_t d = k + 4;
+ uint16_t kpi = ncb / 3;
+ uint16_t nd = kpi - d;
+
+ rte_memcpy(&out[nd], in, d);
+ rte_memcpy(&out[nd + kpi + 64], &in[kpi], d);
+ rte_memcpy(&out[nd + 2 * (kpi + 64)], &in[2 * kpi], d);
+}
+
+static inline void
+process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
+ uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in,
+ struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset,
+ bool check_crc_24b, uint16_t total_left)
+{
+ int ret;
+ int32_t k_idx;
+ int32_t iter_cnt;
+ uint8_t *in, *out, *adapter_input;
+ int32_t ncb, ncb_without_null;
+ struct bblib_turbo_adapter_ul_response adapter_resp;
+ struct bblib_turbo_adapter_ul_request adapter_req;
+ struct bblib_turbo_decoder_request turbo_req;
+ struct bblib_turbo_decoder_response turbo_resp;
+ struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
+
+ k_idx = compute_idx(k);
+
+ ret = is_dec_input_valid(k_idx, kw, total_left);
+ if (ret != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+
+ in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
+ ncb = kw;
+ ncb_without_null = (k + 4) * 3;
+
+ if (check_bit(dec->op_flags, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE)) {
+ struct bblib_deinterleave_ul_request deint_req;
+ struct bblib_deinterleave_ul_response deint_resp;
+
+ /* SW decoder accepts only a circular buffer without NULL bytes
+ * so the input needs to be converted.
+ */
+ remove_nulls_from_circular_buf(in, q->deint_input, k, ncb);
+
+ deint_req.pharqbuffer = q->deint_input;
+ deint_req.ncb = ncb_without_null;
+ deint_resp.pinteleavebuffer = q->deint_output;
+ bblib_deinterleave_ul(&deint_req, &deint_resp);
+ } else
+ move_padding_bytes(in, q->deint_output, k, ncb);
+
+ adapter_input = q->deint_output;
+
+ if (dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN)
+ adapter_req.isinverted = 1;
+ else if (dec->op_flags & RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN)
+ adapter_req.isinverted = 0;
+ else {
+ op->status |= 1 << RTE_BBDEV_DRV_ERROR;
+ rte_bbdev_log(ERR, "LLR format wasn't specified");
+ return;
+ }
+
+ adapter_req.ncb = ncb_without_null;
+ adapter_req.pinteleavebuffer = adapter_input;
+ adapter_resp.pharqout = q->adapter_output;
+ bblib_turbo_adapter_ul(&adapter_req, &adapter_resp);
+
+ out = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3));
+ if (out == NULL) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR, "Too little space in output mbuf");
+ return;
+ }
+ /* rte_bbdev_op_data.offset can be different than the offset of the
+ * appended bytes
+ */
+ out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
+ if (check_crc_24b)
+ turbo_req.c = c + 1;
+ else
+ turbo_req.c = c;
+ turbo_req.input = (int8_t *)q->adapter_output;
+ turbo_req.k = k;
+ turbo_req.k_idx = k_idx;
+ turbo_req.max_iter_num = dec->iter_max;
+ turbo_resp.ag_buf = q->ag;
+ turbo_resp.cb_buf = q->code_block;
+ turbo_resp.output = out;
+ iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp);
+ dec->hard_output.length += (k >> 3);
+
+ if (iter_cnt > 0) {
+ /* Temporary solution for returned iter_count from SDK */
+ iter_cnt = (iter_cnt - 1) / 2;
+ dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count);
+ } else {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR, "Turbo Decoder failed");
+ return;
+ }
+}
+
+static inline void
+enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
+{
+ uint8_t c, r = 0;
+ uint16_t kw, k = 0;
+ struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
+ struct rte_mbuf *m_in = dec->input.data;
+ struct rte_mbuf *m_out = dec->hard_output.data;
+ uint16_t in_offset = dec->input.offset;
+ uint16_t total_left = dec->input.length;
+ uint16_t out_offset = dec->hard_output.offset;
+
+ /* Clear op status */
+ op->status = 0;
+
+ if (m_in == NULL || m_out == NULL) {
+ rte_bbdev_log(ERR, "Invalid mbuf pointer");
+ op->status = 1 << RTE_BBDEV_DATA_ERROR;
+ return;
+ }
+
+ if (dec->code_block_mode == 0) { /* For Transport Block mode */
+ c = dec->tb_params.c;
+ } else { /* For Code Block mode */
+ k = dec->cb_params.k;
+ c = 1;
+ }
+
+ while (total_left > 0) {
+ if (dec->code_block_mode == 0)
+ k = (r < dec->tb_params.c_neg) ?
+ dec->tb_params.k_neg : dec->tb_params.k_pos;
+
+ /* Calculates circular buffer size (Kw).
+ * According to 3gpp 36.212 section 5.1.4.2
+ * Kw = 3 * Kpi,
+ * where:
+ * Kpi = nCol * nRow
+ * where nCol is 32 and nRow can be calculated from:
+ * D =< nCol * nRow
+ * where D is the size of each output from turbo encoder block
+ * (k + 4).
+ */
+ kw = RTE_ALIGN_CEIL(k + 4, C_SUBBLOCK) * 3;
+
+ process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset,
+ out_offset, check_bit(dec->op_flags,
+ RTE_BBDEV_TURBO_CRC_TYPE_24B), total_left);
+ /* As a result of decoding we get Code Block with included
+ * decoded CRC24 at the end of Code Block. Type of CRC24 is
+ * specified by flag.
+ */
+
+ /* Update total_left */
+ total_left -= kw;
+ /* Update offsets for next CBs (if exist) */
+ in_offset += kw;
+ out_offset += (k >> 3);
+ r++;
+ }
+ if (total_left != 0) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR,
+ "Mismatch between mbuf length and included Circular buffer sizes");
+ }
+}
+
+static inline uint16_t
+enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; ++i)
+ enqueue_dec_one_op(q, ops[i]);
+
+ return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
+ NULL);
+}
+
+/* Enqueue burst */
+static uint16_t
+enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+ void *queue = q_data->queue_private;
+ struct turbo_sw_queue *q = queue;
+ uint16_t nb_enqueued = 0;
+
+ nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops);
+
+ q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
+ q_data->queue_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/* Enqueue burst */
+static uint16_t
+enqueue_dec_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+ void *queue = q_data->queue_private;
+ struct turbo_sw_queue *q = queue;
+ uint16_t nb_enqueued = 0;
+
+ nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops);
+
+ q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
+ q_data->queue_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/* Dequeue decode burst */
+static uint16_t
+dequeue_dec_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+ struct turbo_sw_queue *q = q_data->queue_private;
+ uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ q_data->queue_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Dequeue encode burst */
+static uint16_t
+dequeue_enc_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+ struct turbo_sw_queue *q = q_data->queue_private;
+ uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ q_data->queue_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Parse 16bit integer from string argument */
+static inline int
+parse_u16_arg(const char *key, const char *value, void *extra_args)
+{
+ uint16_t *u16 = extra_args;
+ unsigned int long result;
+
+ if ((value == NULL) || (extra_args == NULL))
+ return -EINVAL;
+ errno = 0;
+ result = strtoul(value, NULL, 0);
+ if ((result >= (1 << 16)) || (errno != 0)) {
+ rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key);
+ return -ERANGE;
+ }
+ *u16 = (uint16_t)result;
+ return 0;
+}
+
+/* Parse parameters used to create device */
+static int
+parse_turbo_sw_params(struct turbo_sw_params *params, const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args, turbo_sw_valid_params);
+ if (kvlist == NULL)
+ return -EFAULT;
+
+ ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[0],
+ &parse_u16_arg, &params->queues_num);
+ if (ret < 0)
+ goto exit;
+
+ ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[1],
+ &parse_u16_arg, &params->socket_id);
+ if (ret < 0)
+ goto exit;
+
+ if (params->socket_id >= RTE_MAX_NUMA_NODES) {
+ rte_bbdev_log(ERR, "Invalid socket, must be < %u",
+ RTE_MAX_NUMA_NODES);
+ goto exit;
+ }
+ }
+
+exit:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+/* Create device */
+static int
+turbo_sw_bbdev_create(struct rte_vdev_device *vdev,
+ struct turbo_sw_params *init_params)
+{
+ struct rte_bbdev *bbdev;
+ const char *name = rte_vdev_device_name(vdev);
+
+ bbdev = rte_bbdev_allocate(name);
+ if (bbdev == NULL)
+ return -ENODEV;
+
+ bbdev->data->dev_private = rte_zmalloc_socket(name,
+ sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE,
+ init_params->socket_id);
+ if (bbdev->data->dev_private == NULL) {
+ rte_bbdev_release(bbdev);
+ return -ENOMEM;
+ }
+
+ bbdev->dev_ops = &pmd_ops;
+ bbdev->device = &vdev->device;
+ bbdev->data->socket_id = init_params->socket_id;
+ bbdev->intr_handle = NULL;
+
+ /* register rx/tx burst functions for data path */
+ bbdev->dequeue_enc_ops = dequeue_enc_ops;
+ bbdev->dequeue_dec_ops = dequeue_dec_ops;
+ bbdev->enqueue_enc_ops = enqueue_enc_ops;
+ bbdev->enqueue_dec_ops = enqueue_dec_ops;
+ ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues =
+ init_params->queues_num;
+
+ return 0;
+}
+
+/* Initialise device */
+static int
+turbo_sw_bbdev_probe(struct rte_vdev_device *vdev)
+{
+ struct turbo_sw_params init_params = {
+ rte_socket_id(),
+ RTE_BBDEV_DEFAULT_MAX_NB_QUEUES
+ };
+ const char *name;
+ const char *input_args;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ parse_turbo_sw_params(&init_params, input_args);
+
+ rte_bbdev_log_debug(
+ "Initialising %s on NUMA node %d with max queues: %d\n",
+ name, init_params.socket_id, init_params.queues_num);
+
+ return turbo_sw_bbdev_create(vdev, &init_params);
+}
+
+/* Uninitialise device */
+static int
+turbo_sw_bbdev_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_bbdev *bbdev;
+ const char *name;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ bbdev = rte_bbdev_get_named_dev(name);
+ if (bbdev == NULL)
+ return -EINVAL;
+
+ rte_free(bbdev->data->dev_private);
+
+ return rte_bbdev_release(bbdev);
+}
+
+static struct rte_vdev_driver bbdev_turbo_sw_pmd_drv = {
+ .probe = turbo_sw_bbdev_probe,
+ .remove = turbo_sw_bbdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
+ TURBO_SW_MAX_NB_QUEUES_ARG"=<int> "
+ TURBO_SW_SOCKET_ID_ARG"=<int>");
+
+RTE_INIT(null_bbdev_init_log);
+static void
+null_bbdev_init_log(void)
+{
+ bbdev_turbo_sw_logtype = rte_log_register("pmd.bb.turbo_sw");
+ if (bbdev_turbo_sw_logtype >= 0)
+ rte_log_set_level(bbdev_turbo_sw_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map b/drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map
new file mode 100644
index 00000000..58b94270
--- /dev/null
+++ b/drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map
@@ -0,0 +1,3 @@
+DPDK_18.02 {
+ local: *;
+};
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index c20beb43..7ef25939 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright 2016 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of NXP nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/bus/dpaa/Makefile b/drivers/bus/dpaa/Makefile
index f672f540..bffaa9d9 100644
--- a/drivers/bus/dpaa/Makefile
+++ b/drivers/bus/dpaa/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright 2016 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of NXP nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
include $(RTE_SDK)/mk/rte.vars.mk
RTE_BUS_DPAA=$(RTE_SDK)/drivers/bus/dpaa
@@ -36,6 +9,7 @@ RTE_BUS_DPAA=$(RTE_SDK)/drivers/bus/dpaa
#
LIB = librte_bus_dpaa.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS := -I$(SRCDIR) $(CFLAGS)
CFLAGS += -O3 $(WERROR_FLAGS)
CFLAGS += -Wno-pointer-arith
diff --git a/drivers/bus/dpaa/base/fman/fman.c b/drivers/bus/dpaa/base/fman/fman.c
index 3816dba5..bda62e01 100644
--- a/drivers/bus/dpaa/base/fman/fman.c
+++ b/drivers/bus/dpaa/base/fman/fman.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2010-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
@@ -599,7 +566,7 @@ fman_finish(void)
/* release the mapping */
_errno = munmap(__if->ccsr_map, __if->regs_size);
if (unlikely(_errno < 0))
- fprintf(stderr, "%s:%hu:%s(): munmap() = %d (%s)\n",
+ fprintf(stderr, "%s:%d:%s(): munmap() = %d (%s)\n",
__FILE__, __LINE__, __func__,
-errno, strerror(errno));
printf("Tearing down %s\n", __if->node_path);
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 077c17c0..0148b98e 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -1,30 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
diff --git a/drivers/bus/dpaa/base/fman/netcfg_layer.c b/drivers/bus/dpaa/base/fman/netcfg_layer.c
index 26cff84a..3e956ce1 100644
--- a/drivers/bus/dpaa/base/fman/netcfg_layer.c
+++ b/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2010-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <inttypes.h>
#include <of.h>
@@ -109,22 +76,6 @@ dump_netcfg(struct netcfg_info *cfg_ptr)
}
#endif /* RTE_LIBRTE_DPAA_DEBUG_DRIVER */
-static inline int
-get_num_netcfg_interfaces(char *str)
-{
- char *pch;
- uint8_t count = 0;
-
- if (str == NULL)
- return -EINVAL;
- pch = strtok(str, ",");
- while (pch != NULL) {
- count++;
- pch = strtok(NULL, ",");
- }
- return count;
-}
-
struct netcfg_info *
netcfg_acquire(void)
{
diff --git a/drivers/bus/dpaa/base/fman/of.c b/drivers/bus/dpaa/base/fman/of.c
index b2d7c020..1b2dbe26 100644
--- a/drivers/bus/dpaa/base/fman/of.c
+++ b/drivers/bus/dpaa/base/fman/of.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2010-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <of.h>
diff --git a/drivers/bus/dpaa/base/qbman/bman.c b/drivers/bus/dpaa/base/qbman/bman.c
index 0480caa9..8a629073 100644
--- a/drivers/bus/dpaa/base/qbman/bman.c
+++ b/drivers/bus/dpaa/base/qbman/bman.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include "bman.h"
diff --git a/drivers/bus/dpaa/base/qbman/bman.h b/drivers/bus/dpaa/base/qbman/bman.h
index 4b088da9..21a6bee7 100644
--- a/drivers/bus/dpaa/base/qbman/bman.h
+++ b/drivers/bus/dpaa/base/qbman/bman.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2010-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
+ * Copyright 2017 NXP
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __BMAN_H
@@ -228,7 +195,9 @@ static inline void bm_rcr_finish(struct bm_portal *portal)
u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(!rcr->busy);
+#endif
if (pi != RCR_PTR2IDX(rcr->cursor))
pr_crit("losing uncommitted RCR entries\n");
if (ci != rcr->ci)
@@ -241,7 +210,9 @@ static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
{
register struct bm_rcr *rcr = &portal->rcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(!rcr->busy);
+#endif
if (!rcr->available)
return NULL;
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
@@ -255,8 +226,8 @@ static inline void bm_rcr_abort(struct bm_portal *portal)
{
__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
- DPAA_ASSERT(rcr->busy);
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(rcr->busy);
rcr->busy = 0;
#endif
}
@@ -266,8 +237,10 @@ static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
{
register struct bm_rcr *rcr = &portal->rcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(rcr->busy);
DPAA_ASSERT(rcr->pmode != bm_rcr_pvb);
+#endif
if (rcr->available == 1)
return NULL;
rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
@@ -282,8 +255,10 @@ static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
{
register struct bm_rcr *rcr = &portal->rcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(rcr->busy);
DPAA_ASSERT(rcr->pmode == bm_rcr_pci);
+#endif
rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
RCR_INC(rcr);
rcr->available--;
@@ -298,7 +273,9 @@ static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
{
__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
+#endif
bm_cl_invalidate(RCR_PI);
bm_cl_touch_rw(RCR_PI);
}
@@ -307,8 +284,10 @@ static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
{
register struct bm_rcr *rcr = &portal->rcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(rcr->busy);
DPAA_ASSERT(rcr->pmode == bm_rcr_pce);
+#endif
rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
RCR_INC(rcr);
rcr->available--;
@@ -324,8 +303,10 @@ static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
register struct bm_rcr *rcr = &portal->rcr;
struct bm_rcr_entry *rcursor;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(rcr->busy);
DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+#endif
lwsync();
rcursor = rcr->cursor;
rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
@@ -342,7 +323,9 @@ static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
register struct bm_rcr *rcr = &portal->rcr;
u8 diff, old_ci = rcr->ci;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(rcr->cmode == bm_rcr_cci);
+#endif
rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
rcr->available += diff;
@@ -353,7 +336,9 @@ static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
{
__maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+#endif
bm_cl_touch_ro(RCR_CI);
}
@@ -362,7 +347,9 @@ static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
register struct bm_rcr *rcr = &portal->rcr;
u8 diff, old_ci = rcr->ci;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+#endif
rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
bm_cl_invalidate(RCR_CI);
diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
@@ -420,8 +407,8 @@ static inline void bm_mc_finish(struct bm_portal *portal)
{
__maybe_unused register struct bm_mc *mc = &portal->mc;
- DPAA_ASSERT(mc->state == mc_idle);
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_idle);
if (mc->state != mc_idle)
pr_crit("Losing incomplete MC command\n");
#endif
@@ -431,8 +418,8 @@ static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
{
register struct bm_mc *mc = &portal->mc;
- DPAA_ASSERT(mc->state == mc_idle);
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_idle);
mc->state = mc_user;
#endif
dcbz_64(mc->cr);
@@ -443,8 +430,8 @@ static inline void bm_mc_abort(struct bm_portal *portal)
{
__maybe_unused register struct bm_mc *mc = &portal->mc;
- DPAA_ASSERT(mc->state == mc_user);
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == mc_user);
mc->state = mc_idle;
#endif
}
@@ -454,7 +441,9 @@ static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
register struct bm_mc *mc = &portal->mc;
struct bm_mc_result *rr = mc->rr + mc->rridx;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(mc->state == mc_user);
+#endif
lwsync();
mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
dcbf(mc->cr);
@@ -469,7 +458,9 @@ static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
register struct bm_mc *mc = &portal->mc;
struct bm_mc_result *rr = mc->rr + mc->rridx;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(mc->state == mc_hw);
+#endif
/* The inactive response register's verb byte always returns zero until
* its command is submitted and completed. This includes the valid-bit,
* in case you were wondering.
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c
index 5c13a803..a1ef3921 100644
--- a/drivers/bus/dpaa/base/qbman/bman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_branch_prediction.h>
diff --git a/drivers/bus/dpaa/base/qbman/bman_priv.h b/drivers/bus/dpaa/base/qbman/bman_priv.h
index 07d9cec6..5a3e330d 100644
--- a/drivers/bus/dpaa/base/qbman/bman_priv.h
+++ b/drivers/bus/dpaa/base/qbman/bman_priv.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __BMAN_PRIV_H
diff --git a/drivers/bus/dpaa/base/qbman/dpaa_alloc.c b/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
index 35dba7f7..a05803c2 100644
--- a/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
+++ b/drivers/bus/dpaa/base/qbman/dpaa_alloc.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2009-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include "dpaa_sys.h"
diff --git a/drivers/bus/dpaa/base/qbman/dpaa_sys.c b/drivers/bus/dpaa/base/qbman/dpaa_sys.c
index 0017da52..9d6bfd40 100644
--- a/drivers/bus/dpaa/base/qbman/dpaa_sys.c
+++ b/drivers/bus/dpaa/base/qbman/dpaa_sys.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <process.h>
diff --git a/drivers/bus/dpaa/base/qbman/dpaa_sys.h b/drivers/bus/dpaa/base/qbman/dpaa_sys.h
index bee9fe57..034991ba 100644
--- a/drivers/bus/dpaa/base/qbman/dpaa_sys.h
+++ b/drivers/bus/dpaa/base/qbman/dpaa_sys.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DPAA_SYS_H
diff --git a/drivers/bus/dpaa/base/qbman/process.c b/drivers/bus/dpaa/base/qbman/process.c
index b8ec5398..2c23c98d 100644
--- a/drivers/bus/dpaa/base/qbman/process.c
+++ b/drivers/bus/dpaa/base/qbman/process.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2011-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <fcntl.h>
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index 87fec60d..2b97671b 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -1,45 +1,15 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include "qman.h"
#include <rte_branch_prediction.h>
+#include <rte_dpaa_bus.h>
+#include <rte_eventdev.h>
+#include <rte_byteorder.h>
/* Compilation constants */
#define DQRR_MAXFILL 15
@@ -416,7 +386,9 @@ static inline void qm_eqcr_finish(struct qm_portal *portal)
qm_cl_invalidate(EQCR_CI);
eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(!eqcr->busy);
+#endif
if (pi != EQCR_PTR2IDX(eqcr->cursor))
pr_crit("losing uncommitted EQCR entries\n");
if (ci != eqcr->ci)
@@ -505,7 +477,9 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
register struct qm_mr *mr = &portal->mr;
const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+#endif
/* when accessing 'verb', use __raw_readb() to ensure that compiler
* inlining doesn't try to optimise out "excess reads".
*/
@@ -532,7 +506,12 @@ struct qman_portal *qman_create_portal(
p = &portal->p;
- portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY)
+ portal->use_eqcr_ci_stashing = 3;
+ else
+ portal->use_eqcr_ci_stashing =
+ ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+
/*
* prep the low-level portal struct with the mapped addresses from the
* config, everything that follows depends on it and "config" is more
@@ -545,7 +524,7 @@ struct qman_portal *qman_create_portal(
* and stash with high-than-DQRR priority.
*/
if (qm_eqcr_init(p, qm_eqcr_pvb,
- portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ portal->use_eqcr_ci_stashing, 1)) {
pr_err("Qman EQCR initialisation failed\n");
goto fail_eqcr;
}
@@ -644,11 +623,50 @@ fail_eqcr:
return NULL;
}
+#define MAX_GLOBAL_PORTALS 8
+static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
+rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
+
+static struct qman_portal *
+qman_alloc_global_portal(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
+ if (rte_atomic16_test_and_set(&global_portals_used[i]))
+ return &global_portals[i];
+ }
+ pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS);
+
+ return NULL;
+}
+
+static int
+qman_free_global_portal(struct qman_portal *portal)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
+ if (&global_portals[i] == portal) {
+ rte_atomic16_clear(&global_portals_used[i]);
+ return 0;
+ }
+ }
+ return -1;
+}
+
struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
- const struct qman_cgrs *cgrs)
+ const struct qman_cgrs *cgrs,
+ int alloc)
{
struct qman_portal *res;
- struct qman_portal *portal = get_affine_portal();
+ struct qman_portal *portal;
+
+ if (alloc)
+ portal = qman_alloc_global_portal();
+ else
+ portal = get_affine_portal();
+
/* A criteria for calling this function (from qman_driver.c) is that
* we're already affine to the cpu and won't schedule onto another cpu.
*/
@@ -698,13 +716,18 @@ void qman_destroy_portal(struct qman_portal *qm)
spin_lock_destroy(&qm->cgr_lock);
}
-const struct qm_portal_config *qman_destroy_affine_portal(void)
+const struct qm_portal_config *
+qman_destroy_affine_portal(struct qman_portal *qp)
{
/* We don't want to redirect if we're a slave, use "raw" */
- struct qman_portal *qm = get_affine_portal();
+ struct qman_portal *qm;
const struct qm_portal_config *pcfg;
int cpu;
+ if (qp == NULL)
+ qm = get_affine_portal();
+ else
+ qm = qp;
pcfg = qm->config;
cpu = pcfg->cpu;
@@ -713,6 +736,9 @@ const struct qm_portal_config *qman_destroy_affine_portal(void)
spin_lock(&affine_mask_lock);
CPU_CLR(cpu, &affine_mask);
spin_unlock(&affine_mask_lock);
+
+ qman_free_global_portal(qm);
+
return pcfg;
}
@@ -929,7 +955,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
do {
qm_dqrr_pvb_update(&p->p);
dq = qm_dqrr_current(&p->p);
- if (!dq)
+ if (unlikely(!dq))
break;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
/* If running on an LE system the fields of the
@@ -1025,6 +1051,137 @@ u16 qman_affine_channel(int cpu)
return affine_channels[cpu];
}
+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+ void **bufs,
+ struct qman_portal *p)
+{
+ struct qm_portal *portal = &p->p;
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
+ struct qman_fq *fq[QM_DQRR_SIZE];
+ unsigned int limit = 0, rx_number = 0;
+ uint32_t consume = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ if (!dqrr->fill)
+ break;
+
+ dq[rx_number] = dqrr->cursor;
+ dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
+ /* Prefetch the next DQRR entry */
+ rte_prefetch0(dqrr->cursor);
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow[rx_number] =
+ &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
+ shadow[rx_number]->fd.opaque_addr =
+ dq[rx_number]->fd.opaque_addr;
+ shadow[rx_number]->fd.addr =
+ be40_to_cpu(dq[rx_number]->fd.addr);
+ shadow[rx_number]->fd.opaque =
+ be32_to_cpu(dq[rx_number]->fd.opaque);
+#else
+ shadow = dq;
+#endif
+
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq[rx_number] = qman_fq_lookup_table[be32_to_cpu(
+ dq[rx_number]->contextB)];
+#else
+ fq[rx_number] = (void *)(uintptr_t)be32_to_cpu(dq->contextB);
+#endif
+ fq[rx_number]->cb.dqrr_prepare(shadow[rx_number],
+ &bufs[rx_number]);
+
+ consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
+ rx_number++;
+ --dqrr->fill;
+ } while (++limit < poll_limit);
+
+ if (rx_number)
+ fq[0]->cb.dqrr_dpdk_pull_cb(fq, shadow, bufs, rx_number);
+
+ /* Consume all the DQRR enries together */
+ qm_out(DQRR_DCAP, (1 << 8) | consume);
+
+ return rx_number;
+}
+
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+ void **bufs)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+ struct qman_portal *p = get_affine_portal();
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ struct qm_dqrr_entry *shadow;
+#endif
+ unsigned int rx_number = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /*
+ * If running on an LE system the fields of the
+ * dequeue entry must be swapper. Because the
+ * QMan HW will ignore writes the DQRR entry is
+ * copied and the index stored within the copy
+ */
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ *shadow = *dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->contextB = be32_to_cpu(shadow->contextB);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+
+ /* SDQCR: context_b points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
+ dq, &bufs[rx_number]);
+ rx_number++;
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit);
+
+ return limit;
+}
+
struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
{
struct qman_portal *p = get_affine_portal();
@@ -1119,37 +1276,44 @@ void qman_start_dequeues(void)
qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
}
-void qman_static_dequeue_add(u32 pools)
+void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
{
- struct qman_portal *p = get_affine_portal();
+ struct qman_portal *p = qp ? qp : get_affine_portal();
pools &= p->config->pools;
p->sdqcr |= pools;
qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
}
-void qman_static_dequeue_del(u32 pools)
+void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
{
- struct qman_portal *p = get_affine_portal();
+ struct qman_portal *p = qp ? qp : get_affine_portal();
pools &= p->config->pools;
p->sdqcr &= ~pools;
qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
}
-u32 qman_static_dequeue_get(void)
+u32 qman_static_dequeue_get(struct qman_portal *qp)
{
- struct qman_portal *p = get_affine_portal();
+ struct qman_portal *p = qp ? qp : get_affine_portal();
return p->sdqcr;
}
-void qman_dca(struct qm_dqrr_entry *dq, int park_request)
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
{
struct qman_portal *p = get_affine_portal();
qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
}
+void qman_dca_index(u8 index, int park_request)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ qm_dqrr_cdc_consume_1(&p->p, index, park_request);
+}
+
/* Frame queue API */
static const char *mcr_result_str(u8 result)
{
@@ -1188,6 +1352,7 @@ int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
}
spin_lock_init(&fq->fqlock);
fq->fqid = fqid;
+ fq->fqid_le = cpu_to_be32(fqid);
fq->flags = flags;
fq->state = qman_fq_state_oos;
fq->cgr_groupid = 0;
@@ -1267,6 +1432,7 @@ void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
switch (fq->state) {
case qman_fq_state_parked:
DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
+ /* Fallthrough */
case qman_fq_state_oos:
if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
qman_release_fqid(fq->fqid);
@@ -1694,6 +1860,28 @@ int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
return 0;
}
+int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt);
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (mcr->result != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+
int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
{
struct qm_mc_command *mcc;
@@ -1974,7 +2162,7 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
}
int qman_enqueue_multi(struct qman_fq *fq,
- const struct qm_fd *fd,
+ const struct qm_fd *fd, u32 *flags,
int frames_to_send)
{
struct qman_portal *p = get_affine_portal();
@@ -1983,7 +2171,7 @@ int qman_enqueue_multi(struct qman_fq *fq,
register struct qm_eqcr *eqcr = &portal->eqcr;
struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
- u8 i, diff, old_ci, sent = 0;
+ u8 i = 0, diff, old_ci, sent = 0;
/* Update the available entries if no entry is free */
if (!eqcr->available) {
@@ -1997,7 +2185,7 @@ int qman_enqueue_multi(struct qman_fq *fq,
/* try to send as many frames as possible */
while (eqcr->available && frames_to_send--) {
- eq->fqid = cpu_to_be32(fq->fqid);
+ eq->fqid = fq->fqid_le;
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
eq->tag = cpu_to_be32(fq->key);
#else
@@ -2007,6 +2195,76 @@ int qman_enqueue_multi(struct qman_fq *fq,
eq->fd.addr = cpu_to_be40(fd->addr);
eq->fd.status = cpu_to_be32(fd->status);
eq->fd.opaque = cpu_to_be32(fd->opaque);
+ if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
+ }
+ i++;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eqcr->available--;
+ sent++;
+ fd++;
+ }
+ lwsync();
+
+ /* In order for flushes to complete faster, all lines are recorded in
+ * 32 bit word.
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ eq->__dont_write_directly__verb =
+ QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
+ prev_eq = eq;
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ if (unlikely((prev_eq + 1) != eq))
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+ }
+
+ /* We need to flush all the lines but without load/store operations
+ * between them
+ */
+ eq = eqcr->cursor;
+ for (i = 0; i < sent; i++) {
+ dcbf(eq);
+ eq = (void *)((unsigned long)(eq + 1) &
+ (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ }
+ /* Update cursor for the next call */
+ eqcr->cursor = eq;
+ return sent;
+}
+
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+ int frames_to_send)
+{
+ struct qman_portal *p = get_affine_portal();
+ struct qm_portal *portal = &p->p;
+
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+
+ u8 i, diff, old_ci, sent = 0;
+
+ /* Update the available entries if no entry is free */
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return 0;
+ }
+
+ /* try to send as many frames as possible */
+ while (eqcr->available && frames_to_send--) {
+ eq->fqid = fq[sent]->fqid_le;
+ eq->fd.opaque_addr = fd->opaque_addr;
+ eq->fd.addr = cpu_to_be40(fd->addr);
+ eq->fd.status = cpu_to_be32(fd->status);
+ eq->fd.opaque = cpu_to_be32(fd->opaque);
eq = (void *)((unsigned long)(eq + 1) &
(~(unsigned long)(QM_EQCR_SIZE << 6)));
diff --git a/drivers/bus/dpaa/base/qbman/qman.h b/drivers/bus/dpaa/base/qbman/qman.h
index 2c0f694c..4346d865 100644
--- a/drivers/bus/dpaa/base/qbman/qman.h
+++ b/drivers/bus/dpaa/base/qbman/qman.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
+ * Copyright 2017 NXP
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include "qman_priv.h"
@@ -187,7 +154,7 @@ struct qm_eqcr {
};
struct qm_dqrr {
- const struct qm_dqrr_entry *ring, *cursor;
+ struct qm_dqrr_entry *ring, *cursor;
u8 pi, ci, fill, ithresh, vbit;
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
enum qm_dqrr_dmode dmode;
@@ -267,7 +234,9 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
{
register struct qm_eqcr *eqcr = &portal->eqcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(!eqcr->busy);
+#endif
if (!eqcr->available)
return NULL;
@@ -284,7 +253,9 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
register struct qm_eqcr *eqcr = &portal->eqcr;
u8 diff, old_ci;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(!eqcr->busy);
+#endif
if (!eqcr->available) {
old_ci = eqcr->ci;
eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
@@ -303,8 +274,8 @@ static inline void qm_eqcr_abort(struct qm_portal *portal)
{
__maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
- DPAA_ASSERT(eqcr->busy);
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(eqcr->busy);
eqcr->busy = 0;
#endif
}
@@ -314,8 +285,10 @@ static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
{
register struct qm_eqcr *eqcr = &portal->eqcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(eqcr->busy);
DPAA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
+#endif
if (eqcr->available == 1)
return NULL;
eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
@@ -336,8 +309,10 @@ static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
{
register struct qm_eqcr *eqcr = &portal->eqcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
EQCR_COMMIT_CHECKS(eqcr);
DPAA_ASSERT(eqcr->pmode == qm_eqcr_pci);
+#endif
eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
EQCR_INC(eqcr);
eqcr->available--;
@@ -353,7 +328,9 @@ static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
{
__maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+#endif
qm_cl_invalidate(EQCR_PI);
qm_cl_touch_rw(EQCR_PI);
}
@@ -362,8 +339,10 @@ static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
{
register struct qm_eqcr *eqcr = &portal->eqcr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
EQCR_COMMIT_CHECKS(eqcr);
DPAA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+#endif
eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
EQCR_INC(eqcr);
eqcr->available--;
@@ -380,8 +359,10 @@ static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
register struct qm_eqcr *eqcr = &portal->eqcr;
struct qm_eqcr_entry *eqcursor;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
EQCR_COMMIT_CHECKS(eqcr);
DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+#endif
lwsync();
eqcursor = eqcr->cursor;
eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
@@ -460,7 +441,7 @@ static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
}
-static inline const struct qm_dqrr_entry *DQRR_INC(
+static inline struct qm_dqrr_entry *DQRR_INC(
const struct qm_dqrr_entry *e)
{
return DQRR_CARRYCLEAR(e + 1);
@@ -503,7 +484,9 @@ static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
register struct qm_dqrr *dqrr = &portal->dqrr;
u8 diff, old_pi = dqrr->pi;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->pmode == qm_dqrr_pci);
+#endif
dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
dqrr->fill += diff;
@@ -514,7 +497,9 @@ static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
{
__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+#endif
qm_cl_invalidate(DQRR_PI);
qm_cl_touch_ro(DQRR_PI);
}
@@ -524,7 +509,9 @@ static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
register struct qm_dqrr *dqrr = &portal->dqrr;
u8 diff, old_pi = dqrr->pi;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+#endif
dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
dqrr->fill += diff;
@@ -536,7 +523,9 @@ static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
register struct qm_dqrr *dqrr = &portal->dqrr;
const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#endif
/* when accessing 'verb', use __raw_readb() to ensure that compiler
* inlining doesn't try to optimise out "excess reads".
*/
@@ -552,7 +541,9 @@ static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
{
register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+#endif
dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
qm_out(DQRR_CI_CINH, dqrr->ci);
}
@@ -561,7 +552,9 @@ static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
{
register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+#endif
dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
qm_out(DQRR_CI_CINH, dqrr->ci);
}
@@ -570,7 +563,9 @@ static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
{
__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
qm_cl_invalidate(DQRR_CI);
qm_cl_touch_rw(DQRR_CI);
}
@@ -579,7 +574,9 @@ static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
{
register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
qm_cl_out(DQRR_CI, dqrr->ci);
}
@@ -588,7 +585,9 @@ static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
{
register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+#endif
dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
qm_cl_out(DQRR_CI, dqrr->ci);
}
@@ -598,7 +597,9 @@ static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
{
__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
DPAA_ASSERT(idx < QM_DQRR_SIZE);
qm_out(DQRR_DCAP, (0 << 8) | /* S */
((park ? 1 : 0) << 6) | /* PK */
@@ -612,7 +613,9 @@ static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
u8 idx = DQRR_PTR2IDX(dq);
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
DPAA_ASSERT(idx < QM_DQRR_SIZE);
qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
@@ -623,7 +626,9 @@ static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
{
__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
@@ -634,7 +639,9 @@ static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
{
__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
}
@@ -642,7 +649,9 @@ static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
{
__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
qm_cl_invalidate(DQRR_CI);
qm_cl_touch_ro(DQRR_CI);
}
@@ -651,7 +660,9 @@ static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
{
__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+#endif
return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
}
@@ -659,7 +670,9 @@ static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
{
register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
return dqrr->ci;
}
@@ -667,7 +680,9 @@ static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
{
__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
qm_out(DQRR_DCAP, (0 << 8) | /* S */
(1 << 6) | /* PK */
(idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
@@ -677,7 +692,9 @@ static inline void qm_dqrr_park_current(struct qm_portal *portal)
{
register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+#endif
qm_out(DQRR_DCAP, (0 << 8) | /* S */
(1 << 6) | /* PK */
DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
@@ -766,7 +783,9 @@ static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
{
register struct qm_mr *mr = &portal->mr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(mr->cmode == qm_mr_cci);
+#endif
mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
qm_out(MR_CI_CINH, mr->ci);
}
@@ -775,7 +794,9 @@ static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
{
register struct qm_mr *mr = &portal->mr;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(mr->cmode == qm_mr_cci);
+#endif
mr->ci = MR_PTR2IDX(mr->cursor);
qm_out(MR_CI_CINH, mr->ci);
}
@@ -806,8 +827,8 @@ static inline void qm_mc_finish(struct qm_portal *portal)
{
__maybe_unused register struct qm_mc *mc = &portal->mc;
- DPAA_ASSERT(mc->state == qman_mc_idle);
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_idle);
if (mc->state != qman_mc_idle)
pr_crit("Losing incomplete MC command\n");
#endif
@@ -817,8 +838,8 @@ static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
{
register struct qm_mc *mc = &portal->mc;
- DPAA_ASSERT(mc->state == qman_mc_idle);
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
+ DPAA_ASSERT(mc->state == qman_mc_idle);
mc->state = qman_mc_user;
#endif
dcbz_64(mc->cr);
@@ -830,7 +851,9 @@ static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
register struct qm_mc *mc = &portal->mc;
struct qm_mc_result *rr = mc->rr + mc->rridx;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(mc->state == qman_mc_user);
+#endif
lwsync();
mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
dcbf(mc->cr);
@@ -845,7 +868,9 @@ static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
register struct qm_mc *mc = &portal->mc;
struct qm_mc_result *rr = mc->rr + mc->rridx;
+#ifdef RTE_LIBRTE_DPAA_HWDEBUG
DPAA_ASSERT(mc->state == qman_mc_hw);
+#endif
/* The inactive response register's verb byte always returns zero until
* its command is submitted and completed. This includes the valid-bit,
* in case you were wondering.
diff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c b/drivers/bus/dpaa/base/qbman/qman_driver.c
index 7a688967..7cfa8ee4 100644
--- a/drivers/bus/dpaa/base/qbman/qman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <fsl_usd.h>
@@ -57,8 +24,8 @@ void *qman_ccsr_map;
/* The qman clock frequency */
u32 qman_clk;
-static __thread int fd = -1;
-static __thread struct qm_portal_config pcfg;
+static __thread int qmfd = -1;
+static __thread struct qm_portal_config qpcfg;
static __thread struct dpaa_ioctl_portal_map map = {
.type = dpaa_portal_qman
};
@@ -77,16 +44,16 @@ static int fsl_qman_portal_init(uint32_t index, int is_shared)
error(0, ret, "pthread_getaffinity_np()");
return ret;
}
- pcfg.cpu = -1;
+ qpcfg.cpu = -1;
for (loop = 0; loop < CPU_SETSIZE; loop++)
if (CPU_ISSET(loop, &cpuset)) {
- if (pcfg.cpu != -1) {
+ if (qpcfg.cpu != -1) {
pr_err("Thread is not affine to 1 cpu\n");
return -EINVAL;
}
- pcfg.cpu = loop;
+ qpcfg.cpu = loop;
}
- if (pcfg.cpu == -1) {
+ if (qpcfg.cpu == -1) {
pr_err("Bug in getaffinity handling!\n");
return -EINVAL;
}
@@ -98,36 +65,36 @@ static int fsl_qman_portal_init(uint32_t index, int is_shared)
error(0, ret, "process_portal_map()");
return ret;
}
- pcfg.channel = map.channel;
- pcfg.pools = map.pools;
- pcfg.index = map.index;
+ qpcfg.channel = map.channel;
+ qpcfg.pools = map.pools;
+ qpcfg.index = map.index;
/* Make the portal's cache-[enabled|inhibited] regions */
- pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
- pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
+ qpcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
+ qpcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
- fd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
- if (fd == -1) {
+ qmfd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (qmfd == -1) {
pr_err("QMan irq init failed\n");
process_portal_unmap(&map.addr);
return -EBUSY;
}
- pcfg.is_shared = is_shared;
- pcfg.node = NULL;
- pcfg.irq = fd;
+ qpcfg.is_shared = is_shared;
+ qpcfg.node = NULL;
+ qpcfg.irq = qmfd;
- portal = qman_create_affine_portal(&pcfg, NULL);
+ portal = qman_create_affine_portal(&qpcfg, NULL, 0);
if (!portal) {
pr_err("Qman portal initialisation failed (%d)\n",
- pcfg.cpu);
+ qpcfg.cpu);
process_portal_unmap(&map.addr);
return -EBUSY;
}
irq_map.type = dpaa_portal_qman;
irq_map.portal_cinh = map.addr.cinh;
- process_portal_irq_map(fd, &irq_map);
+ process_portal_irq_map(qmfd, &irq_map);
return 0;
}
@@ -136,10 +103,10 @@ static int fsl_qman_portal_finish(void)
__maybe_unused const struct qm_portal_config *cfg;
int ret;
- process_portal_irq_unmap(fd);
+ process_portal_irq_unmap(qmfd);
- cfg = qman_destroy_affine_portal();
- DPAA_BUG_ON(cfg != &pcfg);
+ cfg = qman_destroy_affine_portal(NULL);
+ DPAA_BUG_ON(cfg != &qpcfg);
ret = process_portal_unmap(&map.addr);
if (ret)
error(0, ret, "process_portal_unmap()");
@@ -161,14 +128,119 @@ int qman_thread_finish(void)
void qman_thread_irq(void)
{
- qbman_invoke_irq(pcfg.irq);
+ qbman_invoke_irq(qpcfg.irq);
/* Now we need to uninhibit interrupts. This is the only code outside
* the regular portal driver that manipulates any portal register, so
* rather than breaking that encapsulation I am simply hard-coding the
* offset to the inhibit register here.
*/
- out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+ out_be32(qpcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
+}
+
+struct qman_portal *fsl_qman_portal_create(void)
+{
+ cpu_set_t cpuset;
+ struct qman_portal *res;
+
+ struct qm_portal_config *q_pcfg;
+ int loop, ret;
+ struct dpaa_ioctl_irq_map irq_map;
+ struct dpaa_ioctl_portal_map q_map = {0};
+ int q_fd;
+
+ q_pcfg = kzalloc((sizeof(struct qm_portal_config)), 0);
+ if (!q_pcfg) {
+ error(0, -1, "q_pcfg kzalloc failed");
+ return NULL;
+ }
+
+ /* Verify the thread's cpu-affinity */
+ ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (ret) {
+ error(0, ret, "pthread_getaffinity_np()");
+ return NULL;
+ }
+
+ q_pcfg->cpu = -1;
+ for (loop = 0; loop < CPU_SETSIZE; loop++)
+ if (CPU_ISSET(loop, &cpuset)) {
+ if (q_pcfg->cpu != -1) {
+ pr_err("Thread is not affine to 1 cpu\n");
+ return NULL;
+ }
+ q_pcfg->cpu = loop;
+ }
+ if (q_pcfg->cpu == -1) {
+ pr_err("Bug in getaffinity handling!\n");
+ return NULL;
+ }
+
+ /* Allocate and map a qman portal */
+ q_map.type = dpaa_portal_qman;
+ q_map.index = QBMAN_ANY_PORTAL_IDX;
+ ret = process_portal_map(&q_map);
+ if (ret) {
+ error(0, ret, "process_portal_map()");
+ return NULL;
+ }
+ q_pcfg->channel = q_map.channel;
+ q_pcfg->pools = q_map.pools;
+ q_pcfg->index = q_map.index;
+
+ /* Make the portal's cache-[enabled|inhibited] regions */
+ q_pcfg->addr_virt[DPAA_PORTAL_CE] = q_map.addr.cena;
+ q_pcfg->addr_virt[DPAA_PORTAL_CI] = q_map.addr.cinh;
+
+ q_fd = open(QMAN_PORTAL_IRQ_PATH, O_RDONLY);
+ if (q_fd == -1) {
+ pr_err("QMan irq init failed\n");
+ goto err1;
+ }
+
+ q_pcfg->irq = q_fd;
+
+ res = qman_create_affine_portal(q_pcfg, NULL, true);
+ if (!res) {
+ pr_err("Qman portal initialisation failed (%d)\n",
+ q_pcfg->cpu);
+ goto err2;
+ }
+
+ irq_map.type = dpaa_portal_qman;
+ irq_map.portal_cinh = q_map.addr.cinh;
+ process_portal_irq_map(q_fd, &irq_map);
+
+ return res;
+err2:
+ close(q_fd);
+err1:
+ process_portal_unmap(&q_map.addr);
+ return NULL;
+}
+
+int fsl_qman_portal_destroy(struct qman_portal *qp)
+{
+ const struct qm_portal_config *cfg;
+ struct dpaa_portal_map addr;
+ int ret;
+
+ cfg = qman_destroy_affine_portal(qp);
+ kfree(qp);
+
+ process_portal_irq_unmap(cfg->irq);
+
+ addr.cena = cfg->addr_virt[DPAA_PORTAL_CE];
+ addr.cinh = cfg->addr_virt[DPAA_PORTAL_CI];
+
+ ret = process_portal_unmap(&addr);
+ if (ret)
+ pr_err("process_portal_unmap() (%d)\n", ret);
+
+ kfree((void *)cfg);
+
+ return ret;
}
int qman_global_init(void)
diff --git a/drivers/bus/dpaa/base/qbman/qman_priv.h b/drivers/bus/dpaa/base/qbman/qman_priv.h
index 3e1d7f92..9e4471e6 100644
--- a/drivers/bus/dpaa/base/qbman/qman_priv.h
+++ b/drivers/bus/dpaa/base/qbman/qman_priv.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __QMAN_PRIV_H
@@ -179,8 +146,10 @@ int qm_get_wpm(int *wpm);
struct qman_portal *qman_create_affine_portal(
const struct qm_portal_config *config,
- const struct qman_cgrs *cgrs);
-const struct qm_portal_config *qman_destroy_affine_portal(void);
+ const struct qman_cgrs *cgrs,
+ int alloc);
+const struct qm_portal_config *
+qman_destroy_affine_portal(struct qman_portal *q);
struct qm_portal_config *qm_get_unused_portal(void);
struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 1cc8c893..f2bb3b15 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* System headers */
#include <stdio.h>
@@ -53,10 +27,11 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_ring.h>
#include <rte_bus.h>
+#include <rte_mbuf_pool_ops.h>
#include <rte_dpaa_bus.h>
#include <rte_dpaa_logs.h>
@@ -70,6 +45,7 @@
int dpaa_logtype_bus;
int dpaa_logtype_mempool;
int dpaa_logtype_pmd;
+int dpaa_logtype_eventdev;
struct rte_dpaa_bus rte_dpaa_bus;
struct netcfg_info *dpaa_netcfg;
@@ -77,18 +53,63 @@ struct netcfg_info *dpaa_netcfg;
/* define a variable to hold the portal_key, once created.*/
pthread_key_t dpaa_portal_key;
-RTE_DEFINE_PER_LCORE(bool, _dpaa_io);
+unsigned int dpaa_svr_family;
-static inline void
-dpaa_add_to_device_list(struct rte_dpaa_device *dev)
+RTE_DEFINE_PER_LCORE(bool, dpaa_io);
+RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
+
+static int
+compare_dpaa_devices(struct rte_dpaa_device *dev1,
+ struct rte_dpaa_device *dev2)
{
- TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
+ int comp = 0;
+
+ /* Segragating ETH from SEC devices */
+ if (dev1->device_type > dev2->device_type)
+ comp = 1;
+ else if (dev1->device_type < dev2->device_type)
+ comp = -1;
+ else
+ comp = 0;
+
+ if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
+ return comp;
+
+ if (dev1->id.fman_id > dev2->id.fman_id) {
+ comp = 1;
+ } else if (dev1->id.fman_id < dev2->id.fman_id) {
+ comp = -1;
+ } else {
+ /* FMAN ids match, check for mac_id */
+ if (dev1->id.mac_id > dev2->id.mac_id)
+ comp = 1;
+ else if (dev1->id.mac_id < dev2->id.mac_id)
+ comp = -1;
+ else
+ comp = 0;
+ }
+
+ return comp;
}
static inline void
-dpaa_remove_from_device_list(struct rte_dpaa_device *dev)
+dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
{
- TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
+ int comp, inserted = 0;
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ comp = compare_dpaa_devices(newdev, dev);
+ if (comp < 0) {
+ TAILQ_INSERT_BEFORE(dev, newdev, next);
+ inserted = 1;
+ break;
+ }
+ }
+
+ if (!inserted)
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
}
/*
@@ -204,9 +225,7 @@ dpaa_clean_device_list(void)
}
}
-/** XXX move this function into a separate file */
-static int
-_dpaa_portal_init(void *arg)
+int rte_dpaa_portal_init(void *arg)
{
cpu_set_t cpuset;
pthread_t id;
@@ -277,26 +296,41 @@ _dpaa_portal_init(void *arg)
return ret;
}
- RTE_PER_LCORE(_dpaa_io) = true;
+ RTE_PER_LCORE(dpaa_io) = true;
DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
return 0;
}
-/*
- * rte_dpaa_portal_init - Wrapper over _dpaa_portal_init with thread level check
- * XXX Complete this
- */
int
-rte_dpaa_portal_init(void *arg)
+rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
{
- if (unlikely(!RTE_PER_LCORE(_dpaa_io)))
- return _dpaa_portal_init(arg);
+ /* Affine above created portal with channel*/
+ u32 sdqcr;
+ struct qman_portal *qp;
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io)))
+ rte_dpaa_portal_init(arg);
+
+ /* Initialise qman specific portals */
+ qp = fsl_qman_portal_create();
+ if (!qp) {
+ DPAA_BUS_LOG(ERR, "Unable to alloc fq portal");
+ return -1;
+ }
+ fq->qp = qp;
+ sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
+ qman_static_dequeue_add(sdqcr, qp);
return 0;
}
+int rte_dpaa_portal_fq_close(struct qman_fq *fq)
+{
+ return fsl_qman_portal_destroy(fq->qp);
+}
+
void
dpaa_portal_finish(void *arg)
{
@@ -315,7 +349,7 @@ dpaa_portal_finish(void *arg)
rte_free(dpaa_io_portal);
dpaa_io_portal = NULL;
- RTE_PER_LCORE(_dpaa_io) = false;
+ RTE_PER_LCORE(dpaa_io) = false;
}
#define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
@@ -443,6 +477,8 @@ rte_dpaa_bus_probe(void)
int ret = -1;
struct rte_dpaa_device *dev;
struct rte_dpaa_driver *drv;
+ FILE *svr_file = NULL;
+ unsigned int svr_ver;
BUS_INIT_FUNC_TRACE();
@@ -459,9 +495,24 @@ rte_dpaa_bus_probe(void)
ret = drv->probe(drv, dev);
if (ret)
DPAA_BUS_ERR("Unable to probe.\n");
+
break;
}
}
+
+ /* Register DPAA mempool ops only if any DPAA device has
+ * been detected.
+ */
+ if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list))
+ rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
+
+ svr_file = fopen(DPAA_SOC_ID_FILE, "r");
+ if (svr_file) {
+ if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
+ dpaa_svr_family = svr_ver & SVR_MASK;
+ fclose(svr_file);
+ }
+
return 0;
}
@@ -490,6 +541,10 @@ rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
static enum rte_iova_mode
rte_dpaa_get_iommu_class(void)
{
+ if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
+ (access(DPAA_DEV_PATH2, F_OK) != 0)) {
+ return RTE_IOVA_DC;
+ }
return RTE_IOVA_PA;
}
@@ -522,4 +577,8 @@ dpaa_init_log(void)
dpaa_logtype_pmd = rte_log_register("pmd.dpaa");
if (dpaa_logtype_pmd >= 0)
rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
+
+ dpaa_logtype_eventdev = rte_log_register("eventdev.dpaa");
+ if (dpaa_logtype_eventdev >= 0)
+ rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);
}
diff --git a/drivers/bus/dpaa/include/compat.h b/drivers/bus/dpaa/include/compat.h
index 42733aeb..53707bb4 100644
--- a/drivers/bus/dpaa/include/compat.h
+++ b/drivers/bus/dpaa/include/compat.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2011 Freescale Semiconductor, Inc.
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __COMPAT_H
diff --git a/drivers/bus/dpaa/include/dpaa_bits.h b/drivers/bus/dpaa/include/dpaa_bits.h
index 71f2d809..9bc14d0c 100644
--- a/drivers/bus/dpaa/include/dpaa_bits.h
+++ b/drivers/bus/dpaa/include/dpaa_bits.h
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DPAA_BITS_H
diff --git a/drivers/bus/dpaa/include/dpaa_list.h b/drivers/bus/dpaa/include/dpaa_list.h
index 871e6121..e9457598 100644
--- a/drivers/bus/dpaa/include/dpaa_list.h
+++ b/drivers/bus/dpaa/include/dpaa_list.h
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DPAA_LIST_H
diff --git a/drivers/bus/dpaa/include/dpaa_rbtree.h b/drivers/bus/dpaa/include/dpaa_rbtree.h
index f8c9b593..6c237e70 100644
--- a/drivers/bus/dpaa/include/dpaa_rbtree.h
+++ b/drivers/bus/dpaa/include/dpaa_rbtree.h
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DPAA_RBTREE_H
diff --git a/drivers/bus/dpaa/include/fman.h b/drivers/bus/dpaa/include/fman.h
index 9890e09c..15bf73a4 100644
--- a/drivers/bus/dpaa/include/fman.h
+++ b/drivers/bus/dpaa/include/fman.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2010-2012 Freescale Semiconductor, Inc.
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FMAN_H
@@ -44,7 +11,7 @@
#include <stdbool.h>
#include <net/if.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ether.h>
#include <compat.h>
diff --git a/drivers/bus/dpaa/include/fsl_bman.h b/drivers/bus/dpaa/include/fsl_bman.h
index 383106b1..0c74aba4 100644
--- a/drivers/bus/dpaa/include/fsl_bman.h
+++ b/drivers/bus/dpaa/include/fsl_bman.h
@@ -1,40 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_BMAN_H
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index 95aee67a..c0ef1bff 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -1,40 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
- * BSD LICENSE
+ * Copyright 2017 NXP
*
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_FMAN_H
diff --git a/drivers/bus/dpaa/include/fsl_fman_crc64.h b/drivers/bus/dpaa/include/fsl_fman_crc64.h
index af5803f3..bf162f3a 100644
--- a/drivers/bus/dpaa/include/fsl_fman_crc64.h
+++ b/drivers/bus/dpaa/include/fsl_fman_crc64.h
@@ -1,40 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2011 Freescale Semiconductor, Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_FMAN_CRC64_H
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index eedfd7ea..e9793f30 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1,40 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_QMAN_H
@@ -45,6 +12,7 @@ extern "C" {
#endif
#include <dpaa_rbtree.h>
+#include <rte_eventdev.h>
/* FQ lookups (turn this on for 64bit user-space) */
#if (__WORDSIZE == 64)
@@ -1157,6 +1125,20 @@ typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
struct qman_fq *fq,
const struct qm_dqrr_entry *dqrr);
+typedef enum qman_cb_dqrr_result (*qman_dpdk_cb_dqrr)(void *event,
+ struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bd);
+
+/* This callback type is used when handling buffers in dpdk pull mode */
+typedef void (*qman_dpdk_pull_cb_dqrr)(struct qman_fq **fq,
+ struct qm_dqrr_entry **dqrr,
+ void **bufs,
+ int num_bufs);
+
+typedef void (*qman_dpdk_cb_prepare)(struct qm_dqrr_entry *dq, void **bufs);
+
/*
* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
* are always consumed after the callback returns.
@@ -1215,7 +1197,12 @@ enum qman_fq_state {
*/
struct qman_fq_cb {
- qman_cb_dqrr dqrr; /* for dequeued frames */
+ union { /* for dequeued frames */
+ qman_dpdk_cb_dqrr dqrr_dpdk_cb;
+ qman_dpdk_pull_cb_dqrr dqrr_dpdk_pull_cb;
+ qman_cb_dqrr dqrr;
+ };
+ qman_dpdk_cb_prepare dqrr_prepare;
qman_cb_mr ern; /* for s/w ERNs */
qman_cb_mr fqs; /* frame-queue state changes*/
};
@@ -1223,19 +1210,25 @@ struct qman_fq_cb {
struct qman_fq {
/* Caller of qman_create_fq() provides these demux callbacks */
struct qman_fq_cb cb;
- /*
- * These are internal to the driver, don't touch. In particular, they
- * may change, be removed, or extended (so you shouldn't rely on
- * sizeof(qman_fq) being a constant).
- */
- spinlock_t fqlock;
- u32 fqid;
+
+ u32 fqid_le;
+ u16 ch_id;
+ u8 cgr_groupid;
+ u8 is_static;
+
/* DPDK Interface */
void *dpaa_intf;
+ struct rte_event ev;
+ /* affined portal in case of static queue */
+ struct qman_portal *qp;
+
volatile unsigned long flags;
+
enum qman_fq_state state;
- int cgr_groupid;
+ u32 fqid;
+ spinlock_t fqlock;
+
struct rb_node node;
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
u32 key;
@@ -1317,6 +1310,9 @@ struct qman_cgr {
*/
int qman_get_portal_index(void);
+u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
+ void **bufs);
+
/**
* qman_affine_channel - return the channel ID of an portal
* @cpu: the cpu whose affine portal is the subject of the query
@@ -1327,6 +1323,9 @@ int qman_get_portal_index(void);
*/
u16 qman_affine_channel(int cpu);
+unsigned int qman_portal_poll_rx(unsigned int poll_limit,
+ void **bufs, struct qman_portal *q);
+
/**
* qman_set_vdq - Issue a volatile dequeue command
* @fq: Frame Queue on which the volatile dequeue command is issued
@@ -1414,7 +1413,7 @@ void qman_start_dequeues(void);
* (SDQCR). The requested pools are limited to those the portal has dequeue
* access to.
*/
-void qman_static_dequeue_add(u32 pools);
+void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
/**
* qman_static_dequeue_del - Remove pool channels from the portal SDQCR
@@ -1424,7 +1423,7 @@ void qman_static_dequeue_add(u32 pools);
* register (SDQCR). The requested pools are limited to those the portal has
* dequeue access to.
*/
-void qman_static_dequeue_del(u32 pools);
+void qman_static_dequeue_del(u32 pools, struct qman_portal *qp);
/**
* qman_static_dequeue_get - return the portal's current SDQCR
@@ -1433,7 +1432,7 @@ void qman_static_dequeue_del(u32 pools);
* entire register is returned, so if only the currently-enabled pool channels
* are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
*/
-u32 qman_static_dequeue_get(void);
+u32 qman_static_dequeue_get(struct qman_portal *qp);
/**
* qman_dca - Perform a Discrete Consumption Acknowledgment
@@ -1447,7 +1446,21 @@ u32 qman_static_dequeue_get(void);
* function must be called from the same CPU as that which processed the DQRR
* entry in the first place.
*/
-void qman_dca(struct qm_dqrr_entry *dq, int park_request);
+void qman_dca(const struct qm_dqrr_entry *dq, int park_request);
+
+/**
+ * qman_dca_index - Perform a Discrete Consumption Acknowledgment
+ * @index: the DQRR index to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca_index(u8 index, int park_request);
/**
* qman_eqcr_is_empty - Determine if portal's EQCR is empty
@@ -1644,6 +1657,13 @@ int qman_query_fq_has_pkts(struct qman_fq *fq);
int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
/**
+ * qman_query_fq_frmcnt - Queries fq frame count
+ * @fq: the frame queue object to be queried
+ * @frm_cnt: number of frames in the queue
+ */
+int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
+
+/**
* qman_query_wq - Queries work queue lengths
* @query_dedicated: If non-zero, query length of WQs in the channel dedicated
* to this software portal. Otherwise, query length of WQs in a
@@ -1708,9 +1728,22 @@ int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
*/
int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
-int qman_enqueue_multi(struct qman_fq *fq,
- const struct qm_fd *fd,
- int frames_to_send);
+int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 *flags,
+ int frames_to_send);
+
+/**
+ * qman_enqueue_multi_fq - Enqueue multiple frames to their respective frame
+ * queues.
+ * @fq[]: Array of frame queue objects to enqueue to
+ * @fd: pointer to first descriptor of frame to be enqueued
+ * @frames_to_send: number of frames to be sent.
+ *
+ * This API is similar to qman_enqueue_multi(), but it takes fd which needs
+ * to be processed by different frame queues.
+ */
+int
+qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
+ int frames_to_send);
typedef int (*qman_cb_precommit) (void *arg);
@@ -1993,8 +2026,8 @@ static inline int qman_poll_fq_for_init(struct qman_fq *fq)
}
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define cpu_to_hw_sg(x) (x)
-#define hw_sg_to_cpu(x) (x)
+#define cpu_to_hw_sg(x)
+#define hw_sg_to_cpu(x)
#else
#define cpu_to_hw_sg(x) __cpu_to_hw_sg(x)
#define hw_sg_to_cpu(x) __hw_sg_to_cpu(x)
diff --git a/drivers/bus/dpaa/include/fsl_usd.h b/drivers/bus/dpaa/include/fsl_usd.h
index a3243aff..e1836175 100644
--- a/drivers/bus/dpaa/include/fsl_usd.h
+++ b/drivers/bus/dpaa/include/fsl_usd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2010-2011 Freescale Semiconductor, Inc.
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_USD_H
@@ -100,6 +67,10 @@ void bman_thread_irq(void);
int qman_global_init(void);
int bman_global_init(void);
+/* Direct portal create and destroy */
+struct qman_portal *fsl_qman_portal_create(void);
+int fsl_qman_portal_destroy(struct qman_portal *qp);
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/bus/dpaa/include/netcfg.h b/drivers/bus/dpaa/include/netcfg.h
index b77a6787..7818de68 100644
--- a/drivers/bus/dpaa/include/netcfg.h
+++ b/drivers/bus/dpaa/include/netcfg.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2010-2012 Freescale Semiconductor, Inc.
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __NETCFG_H
diff --git a/drivers/bus/dpaa/include/of.h b/drivers/bus/dpaa/include/of.h
index 2984b1e1..151be5a3 100644
--- a/drivers/bus/dpaa/include/of.h
+++ b/drivers/bus/dpaa/include/of.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2010-2016 Freescale Semiconductor, Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __OF_H
diff --git a/drivers/bus/dpaa/include/process.h b/drivers/bus/dpaa/include/process.h
index 989ddcd5..d9ec94ee 100644
--- a/drivers/bus/dpaa/include/process.h
+++ b/drivers/bus/dpaa/include/process.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2010-2011 Freescale Semiconductor, Inc.
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PROCESS_H
@@ -72,6 +39,11 @@ enum dpaa_portal_type {
dpaa_portal_bman,
};
+struct dpaa_portal_map {
+ void *cinh;
+ void *cena;
+};
+
struct dpaa_ioctl_portal_map {
/* Input parameter, is a qman or bman portal required. */
enum dpaa_portal_type type;
@@ -83,10 +55,8 @@ struct dpaa_ioctl_portal_map {
/* Return value if the map succeeds, this gives the mapped
* cache-inhibited (cinh) and cache-enabled (cena) addresses.
*/
- struct dpaa_portal_map {
- void *cinh;
- void *cena;
- } addr;
+ struct dpaa_portal_map addr;
+
/* Qman-specific return values */
u16 channel;
uint32_t pools;
diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map
index fb9d5323..8d902854 100644
--- a/drivers/bus/dpaa/rte_bus_dpaa_version.map
+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -64,3 +64,31 @@ DPDK_17.11 {
local: *;
};
+
+DPDK_18.02 {
+ global:
+
+ dpaa_logtype_eventdev;
+ dpaa_svr_family;
+ per_lcore_dpaa_io;
+ per_lcore_held_bufs;
+ qm_channel_pool1;
+ qman_alloc_cgrid_range;
+ qman_alloc_pool_range;
+ qman_create_cgr;
+ qman_dca_index;
+ qman_delete_cgr;
+ qman_enqueue_multi_fq;
+ qman_modify_cgr;
+ qman_oos_fq;
+ qman_portal_dequeue;
+ qman_portal_poll_rx;
+ qman_query_fq_frm_cnt;
+ qman_release_cgrid_range;
+ qman_retire_fq;
+ qman_static_dequeue_add;
+ rte_dpaa_portal_fq_close;
+ rte_dpaa_portal_fq_init;
+
+ local: *;
+} DPDK_17.11;
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index eafc944d..718701b1 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTE_DPAA_BUS_H__
#define __RTE_DPAA_BUS_H__
@@ -43,9 +17,24 @@
#define FSL_DPAA_BUS_NAME "FSL_DPAA_BUS"
+#define DPAA_MEMPOOL_OPS_NAME "dpaa"
+
#define DEV_TO_DPAA_DEVICE(ptr) \
container_of(ptr, struct rte_dpaa_device, device)
+/* DPAA SoC identifier; If this is not available, it can be concluded
+ * that board is non-DPAA. Single slot is currently supported.
+ */
+#define DPAA_SOC_ID_FILE "/sys/devices/soc0/soc_id"
+
+#define SVR_LS1043A_FAMILY 0x87920000
+#define SVR_LS1046A_FAMILY 0x87070000
+#define SVR_MASK 0xffff0000
+
+extern unsigned int dpaa_svr_family;
+
+extern RTE_DEFINE_PER_LCORE(bool, dpaa_io);
+
struct rte_dpaa_device;
struct rte_dpaa_driver;
@@ -113,10 +102,10 @@ static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
int i;
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr != NULL; i++) {
- if (paddr >= memseg[i].phys_addr && paddr <
- memseg[i].phys_addr + memseg[i].len)
+ if (paddr >= memseg[i].iova && paddr <
+ memseg[i].iova + memseg[i].len)
return (uint8_t *)(memseg[i].addr) +
- (paddr - memseg[i].phys_addr);
+ (paddr - memseg[i].iova);
}
return NULL;
@@ -151,6 +140,10 @@ void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
*/
int rte_dpaa_portal_init(void *arg);
+int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
+
+int rte_dpaa_portal_fq_close(struct qman_fq *fq);
+
/**
* Cleanup a DPAA Portal
*/
@@ -166,6 +159,20 @@ static void dpaainitfn_ ##nm(void) \
} \
RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+/* Create storage for dqrr entries per lcore */
+#define DPAA_PORTAL_DEQUEUE_DEPTH 16
+struct dpaa_portal_dqrr {
+ void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
+ uint64_t dqrr_held;
+ uint8_t dqrr_size;
+};
+
+RTE_DECLARE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
+
+#define DPAA_PER_LCORE_DQRR_SIZE RTE_PER_LCORE(held_bufs).dqrr_size
+#define DPAA_PER_LCORE_DQRR_HELD RTE_PER_LCORE(held_bufs).dqrr_held
+#define DPAA_PER_LCORE_DQRR_MBUF(i) RTE_PER_LCORE(held_bufs).mbuf[i]
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/bus/dpaa/rte_dpaa_logs.h b/drivers/bus/dpaa/rte_dpaa_logs.h
index 037c96b0..0fd70bbf 100644
--- a/drivers/bus/dpaa/rte_dpaa_logs.h
+++ b/drivers/bus/dpaa/rte_dpaa_logs.h
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DPAA_LOGS_H_
@@ -38,6 +12,7 @@
extern int dpaa_logtype_bus;
extern int dpaa_logtype_mempool;
extern int dpaa_logtype_pmd;
+extern int dpaa_logtype_eventdev;
#define DPAA_BUS_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "%s(): " fmt "\n", \
@@ -100,6 +75,21 @@ extern int dpaa_logtype_pmd;
#define DPAA_PMD_WARN(fmt, args...) \
DPAA_PMD_LOG(WARNING, fmt, ## args)
+#define DPAA_EVENTDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_eventdev, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define EVENTDEV_INIT_FUNC_TRACE() DPAA_EVENTDEV_LOG(DEBUG, " >>")
+
+#define DPAA_EVENTDEV_DEBUG(fmt, args...) \
+ DPAA_EVENTDEV_LOG(DEBUG, fmt, ## args)
+#define DPAA_EVENTDEV_ERR(fmt, args...) \
+ DPAA_EVENTDEV_LOG(ERR, fmt, ## args)
+#define DPAA_EVENTDEV_INFO(fmt, args...) \
+ DPAA_EVENTDEV_LOG(INFO, fmt, ## args)
+#define DPAA_EVENTDEV_WARN(fmt, args...) \
+ DPAA_EVENTDEV_LOG(WARNING, fmt, ## args)
+
/* DP Logs, toggled out at compile time if level lower than current level */
#define DPAA_DP_LOG(level, fmt, args...) \
RTE_LOG_DP(level, PMD, fmt, ## args)
diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile
index c08b2af9..de237f0f 100644
--- a/drivers/bus/fslmc/Makefile
+++ b/drivers/bus/fslmc/Makefile
@@ -1,32 +1,6 @@
-# BSD LICENSE
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
#
-# Copyright 2016 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of NXP nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -39,6 +13,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
CONFIG_RTE_LIBRTE_FSLMC_BUS = $(CONFIG_RTE_LIBRTE_DPAA2_PMD)
endif
+CFLAGS += -DALLOW_EXPERIMENTAL_API
ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
CFLAGS += -O0 -g
CFLAGS += "-Wno-error"
@@ -62,7 +37,8 @@ EXPORT_MAP := rte_bus_fslmc_version.map
LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
- qbman/qbman_portal.c
+ qbman/qbman_portal.c \
+ qbman/qbman_debug.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
mc/dpmng.c \
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index 480857e5..5ee0beb8 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
@@ -40,7 +14,7 @@
#include <rte_malloc.h>
#include <rte_devargs.h>
#include <rte_memcpy.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_fslmc.h>
#include <fslmc_vfio.h>
@@ -51,6 +25,17 @@
#define VFIO_IOMMU_GROUP_PATH "/sys/kernel/iommu_groups"
struct rte_fslmc_bus rte_fslmc_bus;
+uint8_t dpaa2_virt_mode;
+
+uint32_t
+rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type)
+{
+ if (device_type > DPAA2_DEVTYPE_MAX)
+ return 0;
+ return rte_fslmc_bus.device_count[device_type];
+}
+
+RTE_DEFINE_PER_LCORE(struct dpaa2_portal_dqrr, dpaa2_held_bufs);
static void
cleanup_fslmc_device_list(void)
@@ -156,13 +141,16 @@ scan_one_fslmc_device(char *dev_name)
dev->dev_type = DPAA2_BPOOL;
else if (!strncmp("dpio", t_ptr, 4))
dev->dev_type = DPAA2_IO;
- else if (!strncmp("dpci", t_ptr, 5))
+ else if (!strncmp("dpci", t_ptr, 4))
dev->dev_type = DPAA2_CI;
else if (!strncmp("dpmcp", t_ptr, 5))
dev->dev_type = DPAA2_MPORTAL;
else
dev->dev_type = DPAA2_UNKNOWN;
+ /* Update the device found into the device_count table */
+ rte_fslmc_bus.device_count[dev->dev_type]++;
+
t_ptr = strtok(NULL, ".");
if (!t_ptr) {
FSLMC_BUS_LOG(ERR, "Incorrect device string observed (%s).",
@@ -300,6 +288,9 @@ rte_fslmc_probe(void)
}
}
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ dpaa2_virt_mode = 1;
+
return 0;
}
@@ -347,11 +338,51 @@ rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
}
/*
+ * All device has iova as va
+ */
+static inline int
+fslmc_all_device_support_iova(void)
+{
+ int ret = 0;
+ struct rte_dpaa2_device *dev;
+ struct rte_dpaa2_driver *drv;
+
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
+ ret = rte_fslmc_match(drv, dev);
+ if (ret)
+ continue;
+ /* if the driver is not supporting IOVA */
+ if (!(drv->drv_flags & RTE_DPAA2_DRV_IOVA_AS_VA))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
* Get iommu class of DPAA2 devices on the bus.
*/
static enum rte_iova_mode
rte_dpaa2_get_iommu_class(void)
{
+ bool is_vfio_noiommu_enabled = 1;
+ bool has_iova_va;
+
+ if (TAILQ_EMPTY(&rte_fslmc_bus.device_list))
+ return RTE_IOVA_DC;
+
+ /* check if all devices on the bus support Virtual addressing or not */
+ has_iova_va = fslmc_all_device_support_iova();
+
+#ifdef VFIO_PRESENT
+ is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ?
+ true : false;
+#endif
+
+ if (has_iova_va && !is_vfio_noiommu_enabled)
+ return RTE_IOVA_VA;
+
return RTE_IOVA_PA;
}
@@ -364,6 +395,7 @@ struct rte_fslmc_bus rte_fslmc_bus = {
},
.device_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.device_list),
.driver_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.driver_list),
+ .device_count = {0},
};
RTE_REGISTER_BUS(fslmc, rte_fslmc_bus.bus);
diff --git a/drivers/bus/fslmc/fslmc_logs.h b/drivers/bus/fslmc/fslmc_logs.h
index 1f7c24b3..d87b6386 100644
--- a/drivers/bus/fslmc/fslmc_logs.h
+++ b/drivers/bus/fslmc/fslmc_logs.h
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSLMC_LOGS_H_
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 7831201a..1241295b 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
@@ -48,7 +22,7 @@
#include <eal_filesystem.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -249,7 +223,10 @@ int rte_fslmc_vfio_dmamap(void)
dma_map.size = memseg[i].len;
dma_map.vaddr = memseg[i].addr_64;
#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- dma_map.iova = memseg[i].phys_addr;
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ dma_map.iova = dma_map.vaddr;
+ else
+ dma_map.iova = memseg[i].iova;
#else
dma_map.iova = dma_map.vaddr;
#endif
@@ -660,12 +637,14 @@ fslmc_vfio_setup_group(void)
if (ret) {
FSLMC_VFIO_LOG(ERR, "VFIO error getting group status");
close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
return ret;
}
if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
FSLMC_VFIO_LOG(ERR, "VFIO group not viable");
close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
return -EPERM;
}
/* Since Group is VIABLE, Store the groupid */
@@ -680,6 +659,7 @@ fslmc_vfio_setup_group(void)
"Error connecting container with groupid %d",
groupid);
close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
return ret;
}
}
@@ -690,6 +670,7 @@ fslmc_vfio_setup_group(void)
FSLMC_VFIO_LOG(ERR, "Error getting device %s fd from group %d",
g_container, vfio_group.groupid);
close(vfio_group.fd);
+ rte_vfio_clear_group(vfio_group.fd);
return ret;
}
container_device_fd = ret;
diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h
index b442dc08..e8fb3445 100644
--- a/drivers/bus/fslmc/fslmc_vfio.h
+++ b/drivers/bus/fslmc/fslmc_vfio.h
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSLMC_VFIO_H_
diff --git a/drivers/bus/fslmc/mc/dpbp.c b/drivers/bus/fslmc/mc/dpbp.c
index a846245a..0215d22d 100644
--- a/drivers/bus/fslmc/mc/dpbp.c
+++ b/drivers/bus/fslmc/mc/dpbp.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
diff --git a/drivers/bus/fslmc/mc/dpci.c b/drivers/bus/fslmc/mc/dpci.c
index 5471024c..ff366bfa 100644
--- a/drivers/bus/fslmc/mc/dpci.c
+++ b/drivers/bus/fslmc/mc/dpci.c
@@ -1,40 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
diff --git a/drivers/bus/fslmc/mc/dpcon.c b/drivers/bus/fslmc/mc/dpcon.c
index 477ee46e..3f6e04b9 100644
--- a/drivers/bus/fslmc/mc/dpcon.c
+++ b/drivers/bus/fslmc/mc/dpcon.c
@@ -1,33 +1,7 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
*
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
diff --git a/drivers/bus/fslmc/mc/dpio.c b/drivers/bus/fslmc/mc/dpio.c
index 76c5d7b7..966277cc 100644
--- a/drivers/bus/fslmc/mc/dpio.c
+++ b/drivers/bus/fslmc/mc/dpio.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
diff --git a/drivers/bus/fslmc/mc/dpmng.c b/drivers/bus/fslmc/mc/dpmng.c
index f9946e8c..27708087 100644
--- a/drivers/bus/fslmc/mc/dpmng.c
+++ b/drivers/bus/fslmc/mc/dpmng.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp.h b/drivers/bus/fslmc/mc/fsl_dpbp.h
index 77ea6f27..11183626 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPBP_H
#define __FSL_DPBP_H
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
index ce38c79b..18402ced 100644
--- a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_DPBP_CMD_H
#define _FSL_DPBP_CMD_H
diff --git a/drivers/bus/fslmc/mc/fsl_dpci.h b/drivers/bus/fslmc/mc/fsl_dpci.h
index f4aa6e57..f69ed3f3 100644
--- a/drivers/bus/fslmc/mc/fsl_dpci.h
+++ b/drivers/bus/fslmc/mc/fsl_dpci.h
@@ -1,40 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPCI_H
#define __FSL_DPCI_H
diff --git a/drivers/bus/fslmc/mc/fsl_dpci_cmd.h b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
index 73a551a5..634248ac 100644
--- a/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpci_cmd.h
@@ -1,40 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_DPCI_CMD_H
#define _FSL_DPCI_CMD_H
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon.h b/drivers/bus/fslmc/mc/fsl_dpcon.h
index 1da807f0..36dd5f3c 100644
--- a/drivers/bus/fslmc/mc/fsl_dpcon.h
+++ b/drivers/bus/fslmc/mc/fsl_dpcon.h
@@ -1,40 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPCON_H
#define __FSL_DPCON_H
diff --git a/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h b/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
index 4d0522c2..1641e320 100644
--- a/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpcon_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_DPCON_CMD_H
#define _FSL_DPCON_CMD_H
diff --git a/drivers/bus/fslmc/mc/fsl_dpio.h b/drivers/bus/fslmc/mc/fsl_dpio.h
index 3d96adfc..3158f531 100644
--- a/drivers/bus/fslmc/mc/fsl_dpio.h
+++ b/drivers/bus/fslmc/mc/fsl_dpio.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPIO_H
#define __FSL_DPIO_H
diff --git a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
index 3e9e1f68..16a9bc41 100644
--- a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_DPIO_CMD_H
#define _FSL_DPIO_CMD_H
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng.h b/drivers/bus/fslmc/mc/fsl_dpmng.h
index 97030e4c..afaf9b71 100644
--- a/drivers/bus/fslmc/mc/fsl_dpmng.h
+++ b/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPMNG_H
#define __FSL_DPMNG_H
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h b/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
index 4c0a6297..ac380be1 100644
--- a/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_dpmng_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPMNG_CMD_H
diff --git a/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
index 2cec29ea..a3c3e798 100644
--- a/drivers/bus/fslmc/mc/fsl_mc_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_MC_CMD_H
#define __FSL_MC_CMD_H
diff --git a/drivers/bus/fslmc/mc/fsl_mc_sys.h b/drivers/bus/fslmc/mc/fsl_mc_sys.h
index d803205e..d0c7b39f 100644
--- a/drivers/bus/fslmc/mc/fsl_mc_sys.h
+++ b/drivers/bus/fslmc/mc/fsl_mc_sys.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_MC_SYS_H
#define _FSL_MC_SYS_H
@@ -60,7 +27,6 @@ struct fsl_mc_io {
#else /* __linux_driver__ */
#include <stdio.h>
-#include <libio.h>
#include <stdint.h>
#include <errno.h>
#include <sys/uio.h>
diff --git a/drivers/bus/fslmc/mc/mc_sys.c b/drivers/bus/fslmc/mc/mc_sys.c
index f0f9a268..efafdc31 100644
--- a/drivers/bus/fslmc/mc/mc_sys.c
+++ b/drivers/bus/fslmc/mc/mc_sys.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index 334e1f5a..f1f14e29 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
@@ -45,7 +19,8 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_mbuf_pool_ops.h>
#include <fslmc_logs.h>
#include <rte_fslmc.h>
@@ -64,6 +39,7 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
{
struct dpaa2_dpbp_dev *dpbp_node;
int ret;
+ static int register_once;
/* Allocate DPAA2 dpbp handle */
dpbp_node = rte_malloc(NULL, sizeof(struct dpaa2_dpbp_dev), 0);
@@ -100,6 +76,11 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpbp.%d]\n", dpbp_id);
+ if (!register_once) {
+ rte_mbuf_set_platform_mempool_ops(DPAA2_MEMPOOL_OPS_NAME);
+ register_once = 1;
+ }
+
return 0;
}
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index ae189c72..fb28e497 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
@@ -44,7 +18,7 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <fslmc_logs.h>
#include <rte_fslmc.h>
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index f00070f3..eefde155 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <stdio.h>
@@ -50,7 +24,7 @@
#include<sys/eventfd.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -76,6 +50,9 @@ static struct dpio_dev_list dpio_dev_list
= TAILQ_HEAD_INITIALIZER(dpio_dev_list); /*!< DPIO device list */
static uint32_t io_space_count;
+/* Variable to store DPAA2 platform type */
+uint32_t dpaa2_svr_family;
+
/*Stashing Macros default for LS208x*/
static int dpaa2_core_cluster_base = 0x04;
static int dpaa2_cluster_sz = 2;
@@ -109,6 +86,7 @@ dpaa2_core_cluster_sdest(int cpu_id)
return dpaa2_core_cluster_base + x;
}
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
{
#define STRING_LEN 28
@@ -197,6 +175,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
return 0;
}
+#endif
static int
configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
@@ -265,26 +244,6 @@ static int
dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
{
int sdest, ret;
- static int first_time;
-
- /* find the SoC type for the first time */
- if (!first_time) {
- struct mc_soc_version mc_plat_info = {0};
-
- if (mc_get_soc_version(dpio_dev->dpio,
- CMD_PRI_LOW, &mc_plat_info)) {
- PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
- } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) {
- dpaa2_core_cluster_base = 0x02;
- dpaa2_cluster_sz = 4;
- PMD_INIT_LOG(DEBUG, "\tLS108x (A53) Platform Detected");
- } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) {
- dpaa2_core_cluster_base = 0x00;
- dpaa2_cluster_sz = 2;
- PMD_INIT_LOG(DEBUG, "\tLX2160 Platform Detected");
- }
- first_time = 1;
- }
/* Set the Stashing Destination */
if (cpu_id < 0) {
@@ -309,10 +268,12 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
return -1;
}
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
if (dpaa2_dpio_intr_init(dpio_dev)) {
PMD_DRV_LOG(ERR, "Interrupt registration failed for dpio\n");
return -1;
}
+#endif
return 0;
}
@@ -499,6 +460,25 @@ dpaa2_create_dpio_device(int vdev_fd,
rte_free(dpio_dev);
}
+ /* find the SoC type for the first time */
+ if (!dpaa2_svr_family) {
+ struct mc_soc_version mc_plat_info = {0};
+
+ if (mc_get_soc_version(dpio_dev->dpio,
+ CMD_PRI_LOW, &mc_plat_info)) {
+ PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
+ } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) {
+ dpaa2_core_cluster_base = 0x02;
+ dpaa2_cluster_sz = 4;
+ PMD_INIT_LOG(DEBUG, "\tLS108x (A53) Platform Detected");
+ } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) {
+ dpaa2_core_cluster_base = 0x00;
+ dpaa2_cluster_sz = 2;
+ PMD_INIT_LOG(DEBUG, "\tLX2160 Platform Detected");
+ }
+ dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000);
+ }
+
TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpio.%d]\n", object_id);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
index e845340c..c0bd8782 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DPAA2_HW_DPIO_H_
@@ -54,6 +28,9 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
#define DPAA2_PER_LCORE_SEC_DPIO RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
#define DPAA2_PER_LCORE_SEC_PORTAL DPAA2_PER_LCORE_SEC_DPIO->sw_portal
+/* Variable to store DPAA2 platform type */
+extern uint32_t dpaa2_svr_family;
+
extern struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index c1b842f3..d421dbf0 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DPAA2_HW_PVT_H_
@@ -53,8 +27,8 @@
#define SVR_LS2088A 0x87090000
#define SVR_LX2160A 0x87360000
-#ifndef ETH_VLAN_HLEN
-#define ETH_VLAN_HLEN 4 /** < Vlan Header Length */
+#ifndef VLAN_TAG_SIZE
+#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
#endif
#define MAX_TX_RING_SLOTS 8
@@ -70,6 +44,8 @@
/* Maximum release/acquire from QBMAN */
#define DPAA2_MBUF_MAX_ACQ_REL 7
+#define DPAA2_MEMPOOL_OPS_NAME "dpaa2"
+
#define MAX_BPID 256
#define DPAA2_MBUF_HW_ANNOTATION 64
#define DPAA2_FD_PTA_SIZE 0
@@ -105,8 +81,6 @@ struct dpaa2_dpio_dev {
struct rte_intr_handle intr_handle; /* Interrupt related info */
int32_t epoll_fd; /**< File descriptor created for interrupt polling */
int32_t hw_id; /**< An unique ID of this DPIO device instance */
- uint64_t dqrr_held;
- uint8_t dqrr_size;
};
struct dpaa2_dpbp_dev {
@@ -121,8 +95,9 @@ struct dpaa2_dpbp_dev {
struct queue_storage_info_t {
struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
struct qbman_result *active_dqs;
- int active_dpio_id;
- int toggle;
+ uint8_t active_dpio_id;
+ uint8_t toggle;
+ uint8_t last_num_pkts;
};
struct dpaa2_queue;
@@ -199,56 +174,61 @@ enum qbman_fd_format {
};
/*Macros to define operations on FD*/
#define DPAA2_SET_FD_ADDR(fd, addr) do { \
- fd->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \
- fd->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
+ (fd)->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \
+ (fd)->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
} while (0)
-#define DPAA2_SET_FD_LEN(fd, length) (fd)->simple.len = length
+#define DPAA2_SET_FD_LEN(fd, length) ((fd)->simple.len = length)
#define DPAA2_SET_FD_BPID(fd, bpid) ((fd)->simple.bpid_offset |= bpid)
-#define DPAA2_SET_FD_IVP(fd) ((fd->simple.bpid_offset |= 0x00004000))
+#define DPAA2_SET_ONLY_FD_BPID(fd, bpid) \
+ ((fd)->simple.bpid_offset = bpid)
+#define DPAA2_SET_FD_IVP(fd) (((fd)->simple.bpid_offset |= 0x00004000))
#define DPAA2_SET_FD_OFFSET(fd, offset) \
- ((fd->simple.bpid_offset |= (uint32_t)(offset) << 16))
-#define DPAA2_SET_FD_INTERNAL_JD(fd, len) fd->simple.frc = (0x80000000 | (len))
-#define DPAA2_SET_FD_FRC(fd, frc) fd->simple.frc = frc
-#define DPAA2_RESET_FD_CTRL(fd) (fd)->simple.ctrl = 0
+ (((fd)->simple.bpid_offset |= (uint32_t)(offset) << 16))
+#define DPAA2_SET_FD_INTERNAL_JD(fd, len) \
+ ((fd)->simple.frc = (0x80000000 | (len)))
+#define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \
+ ((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16))
+#define DPAA2_SET_FD_FRC(fd, frc) ((fd)->simple.frc = frc)
+#define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0)
#define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
#define DPAA2_SET_FD_FLC(fd, addr) do { \
- fd->simple.flc_lo = lower_32_bits((uint64_t)(addr)); \
- fd->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
+ (fd)->simple.flc_lo = lower_32_bits((uint64_t)(addr)); \
+ (fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
} while (0)
-#define DPAA2_SET_FLE_INTERNAL_JD(fle, len) (fle->frc = (0x80000000 | (len)))
+#define DPAA2_SET_FLE_INTERNAL_JD(fle, len) ((fle)->frc = (0x80000000 | (len)))
#define DPAA2_GET_FLE_ADDR(fle) \
- (uint64_t)((((uint64_t)(fle->addr_hi)) << 32) + fle->addr_lo)
+ (uint64_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo)
#define DPAA2_SET_FLE_ADDR(fle, addr) do { \
- fle->addr_lo = lower_32_bits((uint64_t)addr); \
- fle->addr_hi = upper_32_bits((uint64_t)addr); \
+ (fle)->addr_lo = lower_32_bits((uint64_t)addr); \
+ (fle)->addr_hi = upper_32_bits((uint64_t)addr); \
} while (0)
#define DPAA2_GET_FLE_CTXT(fle) \
(uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
(fle)->reserved[0])
#define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
- fle->reserved[0] = lower_32_bits((uint64_t)addr); \
- fle->reserved[1] = upper_32_bits((uint64_t)addr); \
+ (fle)->reserved[0] = lower_32_bits((uint64_t)addr); \
+ (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \
} while (0)
#define DPAA2_SET_FLE_OFFSET(fle, offset) \
((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
#define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
-#define DPAA2_SET_FLE_FIN(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 31)
+#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 31)
#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
#define DPAA2_SET_FD_COMPOUND_FMT(fd) \
- (fd->simple.bpid_offset |= (uint32_t)1 << 28)
+ ((fd)->simple.bpid_offset |= (uint32_t)1 << 28)
#define DPAA2_GET_FD_ADDR(fd) \
((uint64_t)((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
#define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len)
#define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
-#define DPAA2_GET_FD_IVP(fd) ((fd->simple.bpid_offset & 0x00004000) >> 14)
+#define DPAA2_GET_FD_IVP(fd) (((fd)->simple.bpid_offset & 0x00004000) >> 14)
#define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
#define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
-#define DPAA2_SET_FLE_SG_EXT(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 29)
+#define DPAA2_SET_FLE_SG_EXT(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 29)
#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
- ((fle->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
+ (((fle)->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
#define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
((struct rte_mbuf *)((uint64_t)(buf) - (meta_data_size)))
@@ -276,18 +256,24 @@ enum qbman_fd_format {
#define DPAA2_EQ_RESP_ALWAYS 1
#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+extern uint8_t dpaa2_virt_mode;
static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
/* todo - this is costly, need to write a fast coversion routine */
static void *dpaa2_mem_ptov(phys_addr_t paddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ const struct rte_memseg *memseg;
int i;
+ if (dpaa2_virt_mode)
+ return (void *)paddr;
+
+ memseg = rte_eal_get_physmem_layout();
+
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (paddr >= memseg[i].phys_addr &&
- (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
+ if (paddr >= memseg[i].iova &&
+ (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
return (void *)(memseg[i].addr_64
- + (paddr - memseg[i].phys_addr));
+ + (paddr - memseg[i].iova));
}
return NULL;
}
@@ -295,13 +281,18 @@ static void *dpaa2_mem_ptov(phys_addr_t paddr)
static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ const struct rte_memseg *memseg;
int i;
+ if (dpaa2_virt_mode)
+ return vaddr;
+
+ memseg = rte_eal_get_physmem_layout();
+
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
if (vaddr >= memseg[i].addr_64 &&
vaddr < memseg[i].addr_64 + memseg[i].len)
- return memseg[i].phys_addr
+ return memseg[i].iova
+ (vaddr - memseg[i].addr_64);
}
return (phys_addr_t)(NULL);
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index 423087cb..7be8f54c 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -1,30 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2008-2016 Freescale Semiconductor, Inc.
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef HEADER_COMPAT_H
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 14159609..96269ed4 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -1,29 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_QBMAN_BASE_H
#define _FSL_QBMAN_BASE_H
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
new file mode 100644
index 00000000..072ad551
--- /dev/null
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_debug.h
@@ -0,0 +1,30 @@
+/* Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+struct qbman_swp;
+
+struct qbman_fq_query_np_rslt {
+uint8_t verb;
+ uint8_t rslt;
+ uint8_t st1;
+ uint8_t st2;
+ uint8_t reserved[2];
+ uint16_t od1_sfdr;
+ uint16_t od2_sfdr;
+ uint16_t od3_sfdr;
+ uint16_t ra1_sfdr;
+ uint16_t ra2_sfdr;
+ uint32_t pfdr_hptr;
+ uint32_t pfdr_tptr;
+ uint32_t frm_cnt;
+ uint32_t byte_cnt;
+ uint16_t ics_surp;
+ uint8_t is;
+ uint8_t reserved2[29];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
+ struct qbman_fq_query_np_rslt *r);
+uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
+uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
index 1e656602..3e63db3a 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -1,29 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_QBMAN_PORTAL_H
#define _FSL_QBMAN_PORTAL_H
@@ -403,6 +381,12 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);
/**
+ * qbman_swp_prefetch_dqrr_next() - prefetch the next DQRR entry.
+ * @s: the software portal object.
+ */
+void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s);
+
+/**
* qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
* qbman_swp_dqrr_next().
* @s: the software portal object.
@@ -411,6 +395,13 @@ const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);
void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
/**
+ * qbman_swp_dqrr_idx_consume() - Given the DQRR index consume the DQRR entry
+ * @s: the software portal object.
+ * @dqrr_index: the DQRR index entry to be consumed.
+ */
+void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
+
+/**
* qbman_get_dqrr_idx() - Get dqrr index from the given dqrr
* @dqrr: the given dqrr object.
*
@@ -581,6 +572,9 @@ int qbman_result_is_FQPN(const struct qbman_result *dq);
/* volatile dequeue command is expired */
#define QBMAN_DQ_STAT_EXPIRED 0x01
+#define QBMAN_EQCR_DCA_IDXMASK 0x0f
+#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
+
/**
* qbman_result_DQ_flags() - Get the STAT field of dequeue response
* @dq: the dequeue result.
@@ -971,6 +965,7 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
int qbman_swp_enqueue_multiple(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
+ uint32_t *flags,
int num_frames);
/**
* qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
@@ -978,6 +973,7 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
* @s: the software portal used for enqueue.
* @d: the enqueue descriptor.
* @fd: the frame descriptor to be enqueued.
+ * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
* @num_frames: the number of the frames to be enqueued.
*
* Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
diff --git a/drivers/bus/fslmc/qbman/qbman_debug.c b/drivers/bus/fslmc/qbman/qbman_debug.c
new file mode 100644
index 00000000..591673ab
--- /dev/null
+++ b/drivers/bus/fslmc/qbman/qbman_debug.c
@@ -0,0 +1,66 @@
+/* Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "compat.h"
+#include <fsl_qbman_debug.h>
+#include "qbman_portal.h"
+
+/* QBMan portal management command code */
+#define QBMAN_BP_QUERY 0x32
+#define QBMAN_FQ_QUERY 0x44
+#define QBMAN_FQ_QUERY_NP 0x45
+#define QBMAN_WQ_QUERY 0x47
+#define QBMAN_CGR_QUERY 0x51
+#define QBMAN_WRED_QUERY 0x54
+#define QBMAN_CGR_STAT_QUERY 0x55
+#define QBMAN_CGR_STAT_QUERY_CLR 0x56
+
+struct qbman_fq_query_desc {
+ uint8_t verb;
+ uint8_t reserved[3];
+ uint32_t fqid;
+ uint8_t reserved2[57];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid,
+ struct qbman_fq_query_np_rslt *r)
+{
+ struct qbman_fq_query_desc *p;
+
+ p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->fqid = fqid;
+ *r = *(struct qbman_fq_query_np_rslt *)qbman_swp_mc_complete(s, p,
+ QBMAN_FQ_QUERY_NP);
+ if (!r) {
+ pr_err("qbman: Query FQID %d NP fields failed, no response\n",
+ fqid);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
+
+ /* Determine success or failure */
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
+ fqid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+uint32_t qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return (r->frm_cnt & 0x00FFFFFF);
+}
+
+uint32_t qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return r->byte_cnt;
+}
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index 809770c7..e2217335 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -1,29 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "qbman_portal.h"
@@ -518,6 +496,7 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
int qbman_swp_enqueue_multiple(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
+ uint32_t *flags,
int num_frames)
{
uint32_t *p;
@@ -560,6 +539,12 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
p = qbman_cena_write_start_wo_shadow(&s->sys,
QBMAN_CENA_SWP_EQCR(eqcr_pi & 7));
p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
eqcr_pi++;
eqcr_pi &= 0xF;
if (!(eqcr_pi & 7))
@@ -794,6 +779,17 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
#define QBMAN_RESULT_BPSCN 0x29
#define QBMAN_RESULT_CSCN_WQ 0x2a
+#include <rte_prefetch.h>
+
+void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s)
+{
+ const struct qbman_result *p;
+
+ p = qbman_cena_read_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ rte_prefetch0(p);
+}
+
/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
@@ -881,6 +877,13 @@ void qbman_swp_dqrr_consume(struct qbman_swp *s,
qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
}
+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
+void qbman_swp_dqrr_idx_consume(struct qbman_swp *s,
+ uint8_t dqrr_index)
+{
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, dqrr_index);
+}
+
/*********************************/
/* Polling user-provided storage */
/*********************************/
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.h b/drivers/bus/fslmc/qbman/qbman_portal.h
index d9f3ed7e..8bff0b4f 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/qbman_portal.h
@@ -1,29 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "qbman_sys.h"
diff --git a/drivers/bus/fslmc/qbman/qbman_sys.h b/drivers/bus/fslmc/qbman/qbman_sys.h
index c216e9cf..846788ea 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys.h
@@ -1,29 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
* driver. They are only included via qbman_private.h, which is itself a
@@ -358,7 +336,7 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1,
1, 1);
else
- reg = qbman_set_swp_cfg(dqrr_size, 0, 2, 3, 2, 2, 1, 1, 1, 1,
+ reg = qbman_set_swp_cfg(dqrr_size, 0, 1, 3, 2, 2, 1, 1, 1, 1,
1, 1);
qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
diff --git a/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
index e1125cfe..f82bb18c 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys_decl.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
@@ -1,29 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <compat.h>
#include <fsl_qbman_base.h>
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
index 51a2ac69..b7db0741 100644
--- a/drivers/bus/fslmc/rte_bus_fslmc_version.map
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -28,7 +28,6 @@ DPDK_17.05 {
qbman_eq_desc_set_no_orp;
qbman_eq_desc_set_qd;
qbman_eq_desc_set_response;
- qbman_get_version;
qbman_pull_desc_clear;
qbman_pull_desc_set_fq;
qbman_pull_desc_set_numframes;
@@ -41,7 +40,6 @@ DPDK_17.05 {
qbman_swp_acquire;
qbman_swp_pull;
qbman_swp_release;
- qbman_swp_send_multiple;
rte_fslmc_driver_register;
rte_fslmc_driver_unregister;
rte_fslmc_vfio_dmamap;
@@ -89,3 +87,17 @@ DPDK_17.11 {
rte_dpaa2_intr_enable;
} DPDK_17.08;
+
+DPDK_18.02 {
+ global:
+
+ dpaa2_svr_family;
+ dpaa2_virt_mode;
+ per_lcore_dpaa2_held_bufs;
+ qbman_fq_query_state;
+ qbman_fq_state_frame_count;
+ qbman_swp_dqrr_idx_consume;
+ qbman_swp_prefetch_dqrr_next;
+ rte_fslmc_get_device_count;
+
+} DPDK_17.11;
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 4c32db62..69d0fec7 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_FSLMC_H_
@@ -62,6 +36,13 @@ extern "C" {
#define FSLMC_OBJECT_MAX_LEN 32 /**< Length of each device on bus */
+
+/** Device driver supports link state interrupt */
+#define RTE_DPAA2_DRV_INTR_LSC 0x0008
+
+/** Device driver supports IOVA as VA */
+#define RTE_DPAA2_DRV_IOVA_AS_VA 0X0040
+
struct rte_dpaa2_driver;
/* DPAA2 Device and Driver lists for FSLMC bus */
@@ -81,7 +62,8 @@ enum rte_dpaa2_dev_type {
DPAA2_CI, /**< DPCI type device */
DPAA2_MPORTAL, /**< DPMCP type device */
/* Unknown device placeholder */
- DPAA2_UNKNOWN
+ DPAA2_UNKNOWN,
+ DPAA2_DEVTYPE_MAX,
};
TAILQ_HEAD(rte_dpaa2_object_list, rte_dpaa2_object);
@@ -143,10 +125,28 @@ struct rte_fslmc_bus {
/**< FSLMC DPAA2 Device list */
struct rte_fslmc_driver_list driver_list;
/**< FSLMC DPAA2 Driver list */
- int device_count;
- /**< Optional: Count of devices on bus */
+ int device_count[DPAA2_DEVTYPE_MAX];
+ /**< Count of all devices scanned */
};
+#define DPAA2_PORTAL_DEQUEUE_DEPTH 32
+
+/* Create storage for dqrr entries per lcore */
+struct dpaa2_portal_dqrr {
+ struct rte_mbuf *mbuf[DPAA2_PORTAL_DEQUEUE_DEPTH];
+ uint64_t dqrr_held;
+ uint8_t dqrr_size;
+};
+
+RTE_DECLARE_PER_LCORE(struct dpaa2_portal_dqrr, dpaa2_held_bufs);
+
+#define DPAA2_PER_LCORE_DQRR_SIZE \
+ RTE_PER_LCORE(dpaa2_held_bufs).dqrr_size
+#define DPAA2_PER_LCORE_DQRR_HELD \
+ RTE_PER_LCORE(dpaa2_held_bufs).dqrr_held
+#define DPAA2_PER_LCORE_DQRR_MBUF(i) \
+ RTE_PER_LCORE(dpaa2_held_bufs).mbuf[i]
+
/**
* Register a DPAA2 driver.
*
@@ -175,10 +175,6 @@ static void dpaa2initfn_ ##nm(void) \
} \
RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
-#ifdef __cplusplus
-}
-#endif
-
/**
* Register a DPAA2 MC Object driver.
*
@@ -188,6 +184,17 @@ RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
*/
void rte_fslmc_object_register(struct rte_dpaa2_object *object);
+/**
+ * Count of a particular type of DPAA2 device scanned on the bus.
+ *
+ * @param dev_type
+ * Type of device as rte_dpaa2_dev_type enumerator
+ * @return
+ * >=0 for count; 0 indicates either no device of the said type scanned or
+ * invalid device type.
+ */
+uint32_t rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type);
+
/** Helper for DPAA2 object registration */
#define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \
RTE_INIT(dpaa2objinitfn_ ##nm); \
@@ -198,4 +205,8 @@ static void dpaa2objinitfn_ ##nm(void) \
} \
RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+#ifdef __cplusplus
+}
+#endif
+
#endif /* _RTE_FSLMC_H_ */
diff --git a/drivers/bus/meson.build b/drivers/bus/meson.build
new file mode 100644
index 00000000..c6af500e
--- /dev/null
+++ b/drivers/bus/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['pci', 'vdev']
+std_deps = ['eal']
+config_flag_fmt = 'RTE_LIBRTE_@0@_BUS'
+driver_name_fmt = 'rte_bus_@0@'
diff --git a/drivers/bus/pci/bsd/pci.c b/drivers/bus/pci/bsd/pci.c
index b8e21784..655b34b7 100644
--- a/drivers/bus/pci/bsd/pci.c
+++ b/drivers/bus/pci/bsd/pci.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <ctype.h>
diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
index 5da6728f..abde6411 100644
--- a/drivers/bus/pci/linux/pci.c
+++ b/drivers/bus/pci/linux/pci.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <string.h>
@@ -576,6 +547,82 @@ pci_one_device_has_iova_va(void)
return 0;
}
+#if defined(RTE_ARCH_X86)
+static bool
+pci_one_device_iommu_support_va(struct rte_pci_device *dev)
+{
+#define VTD_CAP_MGAW_SHIFT 16
+#define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT)
+#define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */
+ struct rte_pci_addr *addr = &dev->addr;
+ char filename[PATH_MAX];
+ FILE *fp;
+ uint64_t mgaw, vtd_cap_reg = 0;
+
+ snprintf(filename, sizeof(filename),
+ "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap",
+ rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
+ addr->function);
+ if (access(filename, F_OK) == -1) {
+ /* We don't have an Intel IOMMU, assume VA supported*/
+ return true;
+ }
+
+ /* We have an intel IOMMU */
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+ RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename);
+ return false;
+ }
+
+ if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) {
+ RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename);
+ fclose(fp);
+ return false;
+ }
+
+ fclose(fp);
+
+ mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1;
+ if (mgaw < X86_VA_WIDTH)
+ return false;
+
+ return true;
+}
+#elif defined(RTE_ARCH_PPC_64)
+static bool
+pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev)
+{
+ return false;
+}
+#else
+static bool
+pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev)
+{
+ return true;
+}
+#endif
+
+/*
+ * All devices IOMMUs support VA as IOVA
+ */
+static bool
+pci_devices_iommu_support_va(void)
+{
+ struct rte_pci_device *dev = NULL;
+ struct rte_pci_driver *drv = NULL;
+
+ FOREACH_DRIVER_ON_PCIBUS(drv) {
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ if (!rte_pci_match(drv, dev))
+ continue;
+ if (!pci_one_device_iommu_support_va(dev))
+ return false;
+ }
+ }
+ return true;
+}
+
/*
* Get iommu class of PCI devices on the bus.
*/
@@ -586,12 +633,7 @@ rte_pci_get_iommu_class(void)
bool is_vfio_noiommu_enabled = true;
bool has_iova_va;
bool is_bound_uio;
- bool spapr_iommu =
-#if defined(RTE_ARCH_PPC_64)
- true;
-#else
- false;
-#endif
+ bool iommu_no_va;
is_bound = pci_one_device_is_bound();
if (!is_bound)
@@ -599,13 +641,14 @@ rte_pci_get_iommu_class(void)
has_iova_va = pci_one_device_has_iova_va();
is_bound_uio = pci_one_device_bound_uio();
+ iommu_no_va = !pci_devices_iommu_support_va();
#ifdef VFIO_PRESENT
is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ?
true : false;
#endif
if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled &&
- !spapr_iommu)
+ !iommu_no_va)
return RTE_IOVA_VA;
if (has_iova_va) {
@@ -614,8 +657,8 @@ rte_pci_get_iommu_class(void)
RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n");
if (is_bound_uio)
RTE_LOG(WARNING, EAL, "few device bound to UIO\n");
- if (spapr_iommu)
- RTE_LOG(WARNING, EAL, "sPAPR IOMMU does not support IOVA as VA\n");
+ if (iommu_no_va)
+ RTE_LOG(WARNING, EAL, "IOMMU does not support IOVA as VA\n");
}
return RTE_IOVA_PA;
@@ -723,7 +766,6 @@ pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused,
if (!found)
return -1;
- dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
p->base = start;
RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start);
diff --git a/drivers/bus/pci/linux/pci_init.h b/drivers/bus/pci/linux/pci_init.h
index f342c47d..c2e603a3 100644
--- a/drivers/bus/pci/linux/pci_init.h
+++ b/drivers/bus/pci/linux/pci_init.h
@@ -1,41 +1,10 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef EAL_PCI_INIT_H_
#define EAL_PCI_INIT_H_
-#include <linux/version.h>
-
#include <rte_vfio.h>
/** IO resource type: */
@@ -76,14 +45,22 @@ int pci_uio_ioport_unmap(struct rte_pci_ioport *p);
#ifdef VFIO_PRESENT
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
-#define RTE_PCI_MSIX_TABLE_BIR 0x7
-#define RTE_PCI_MSIX_TABLE_OFFSET 0xfffffff8
-#define RTE_PCI_MSIX_FLAGS_QSIZE 0x07ff
-#else
+#ifdef PCI_MSIX_TABLE_BIR
#define RTE_PCI_MSIX_TABLE_BIR PCI_MSIX_TABLE_BIR
+#else
+#define RTE_PCI_MSIX_TABLE_BIR 0x7
+#endif
+
+#ifdef PCI_MSIX_TABLE_OFFSET
#define RTE_PCI_MSIX_TABLE_OFFSET PCI_MSIX_TABLE_OFFSET
+#else
+#define RTE_PCI_MSIX_TABLE_OFFSET 0xfffffff8
+#endif
+
+#ifdef PCI_MSIX_FLAGS_QSIZE
#define RTE_PCI_MSIX_FLAGS_QSIZE PCI_MSIX_FLAGS_QSIZE
+#else
+#define RTE_PCI_MSIX_FLAGS_QSIZE 0x07ff
#endif
/* access config space */
diff --git a/drivers/bus/pci/linux/pci_uio.c b/drivers/bus/pci/linux/pci_uio.c
index 92b7f027..d423e4bb 100644
--- a/drivers/bus/pci/linux/pci_uio.c
+++ b/drivers/bus/pci/linux/pci_uio.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <string.h>
diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c
index 1f93fa4d..aeeaa9ed 100644
--- a/drivers/bus/pci/linux/pci_vfio.c
+++ b/drivers/bus/pci/linux/pci_vfio.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <string.h>
@@ -459,7 +430,6 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev)
struct pci_map *maps;
dev->intr_handle.fd = -1;
- dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
/* store PCI address string */
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
@@ -576,7 +546,6 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
struct pci_map *maps;
dev->intr_handle.fd = -1;
- dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
/* store PCI address string */
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
diff --git a/drivers/bus/pci/meson.build b/drivers/bus/pci/meson.build
new file mode 100644
index 00000000..12756a4d
--- /dev/null
+++ b/drivers/bus/pci/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+deps += ['pci']
+install_headers('rte_bus_pci.h')
+sources = files('pci_common.c', 'pci_common_uio.c')
+if host_machine.system() == 'linux'
+ sources += files('linux/pci.c',
+ 'linux/pci_uio.c',
+ 'linux/pci_vfio.c')
+ includes += include_directories('linux')
+ cflags += ['-D_GNU_SOURCE']
+else
+ sources += files('bsd/pci.c')
+ includes += include_directories('bsd')
+endif
diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
index 104fdf90..2a00f365 100644
--- a/drivers/bus/pci/pci_common.c
+++ b/drivers/bus/pci/pci_common.c
@@ -1,35 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * Copyright 2013-2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright 2013-2014 6WIND S.A.
*/
#include <string.h>
diff --git a/drivers/bus/pci/pci_common_uio.c b/drivers/bus/pci/pci_common_uio.c
index 06711316..54bc20b5 100644
--- a/drivers/bus/pci/pci_common_uio.c
+++ b/drivers/bus/pci/pci_common_uio.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <fcntl.h>
@@ -119,7 +90,6 @@ pci_uio_map_resource(struct rte_pci_device *dev)
dev->intr_handle.fd = -1;
dev->intr_handle.uio_cfg_fd = -1;
- dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
/* secondary processes - use already recorded details */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
diff --git a/drivers/bus/pci/private.h b/drivers/bus/pci/private.h
index 2283f093..88fa587e 100644
--- a/drivers/bus/pci/private.h
+++ b/drivers/bus/pci/private.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 6WIND. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 6WIND S.A.
*/
#ifndef _PCI_PRIVATE_H_
diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h
index d4a29964..357afb91 100644
--- a/drivers/bus/pci/rte_bus_pci.h
+++ b/drivers/bus/pci/rte_bus_pci.h
@@ -1,35 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * Copyright 2013-2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation.
+ * Copyright 2013-2014 6WIND S.A.
*/
#ifndef _RTE_BUS_PCI_H_
diff --git a/drivers/bus/vdev/Makefile b/drivers/bus/vdev/Makefile
index 84bd7244..24d424a3 100644
--- a/drivers/bus/vdev/Makefile
+++ b/drivers/bus/vdev/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/bus/vdev/meson.build b/drivers/bus/vdev/meson.build
new file mode 100644
index 00000000..03cb87eb
--- /dev/null
+++ b/drivers/bus/vdev/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('vdev.c')
+install_headers('rte_bus_vdev.h')
diff --git a/drivers/bus/vdev/rte_bus_vdev.h b/drivers/bus/vdev/rte_bus_vdev.h
index 41762b85..f9d8a238 100644
--- a/drivers/bus/vdev/rte_bus_vdev.h
+++ b/drivers/bus/vdev/rte_bus_vdev.h
@@ -124,6 +124,41 @@ RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
#define RTE_PMD_REGISTER_ALIAS(nm, alias)\
static const char *vdrvinit_ ## nm ## _alias = RTE_STR(alias)
+typedef void (*rte_vdev_scan_callback)(void *user_arg);
+
+/**
+ * Add a callback to be called on vdev scan
+ * before reading the devargs list.
+ *
+ * This function cannot be called in a scan callback
+ * because of deadlock.
+ *
+ * @param callback
+ * The function to be called which can update the devargs list.
+ * @param user_arg
+ * An opaque pointer passed to callback.
+ * @return
+ * 0 on success, negative on error
+ */
+int
+rte_vdev_add_custom_scan(rte_vdev_scan_callback callback, void *user_arg);
+
+/**
+ * Remove a registered scan callback.
+ *
+ * This function cannot be called in a scan callback
+ * because of deadlock.
+ *
+ * @param callback
+ * The registered function to be removed.
+ * @param user_arg
+ * The associated opaque pointer or (void*)-1 for any.
+ * @return
+ * 0 on success
+ */
+int
+rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg);
+
/**
* Initialize a driver specified by name.
*
diff --git a/drivers/bus/vdev/rte_bus_vdev_version.map b/drivers/bus/vdev/rte_bus_vdev_version.map
index 707b870c..590cf9b4 100644
--- a/drivers/bus/vdev/rte_bus_vdev_version.map
+++ b/drivers/bus/vdev/rte_bus_vdev_version.map
@@ -8,3 +8,11 @@ DPDK_17.11 {
local: *;
};
+
+DPDK_18.02 {
+ global:
+
+ rte_vdev_add_custom_scan;
+ rte_vdev_remove_custom_scan;
+
+} DPDK_17.11;
diff --git a/drivers/bus/vdev/vdev.c b/drivers/bus/vdev/vdev.c
index fd7736d6..e4bc7246 100644
--- a/drivers/bus/vdev/vdev.c
+++ b/drivers/bus/vdev/vdev.c
@@ -44,6 +44,8 @@
#include <rte_common.h>
#include <rte_devargs.h>
#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_spinlock.h>
#include <rte_errno.h>
#include "rte_bus_vdev.h"
@@ -62,6 +64,16 @@ static struct vdev_device_list vdev_device_list =
struct vdev_driver_list vdev_driver_list =
TAILQ_HEAD_INITIALIZER(vdev_driver_list);
+struct vdev_custom_scan {
+ TAILQ_ENTRY(vdev_custom_scan) next;
+ rte_vdev_scan_callback callback;
+ void *user_arg;
+};
+TAILQ_HEAD(vdev_custom_scans, vdev_custom_scan);
+static struct vdev_custom_scans vdev_custom_scans =
+ TAILQ_HEAD_INITIALIZER(vdev_custom_scans);
+static rte_spinlock_t vdev_custom_scan_lock = RTE_SPINLOCK_INITIALIZER;
+
/* register a driver */
void
rte_vdev_register(struct rte_vdev_driver *driver)
@@ -76,6 +88,53 @@ rte_vdev_unregister(struct rte_vdev_driver *driver)
TAILQ_REMOVE(&vdev_driver_list, driver, next);
}
+int
+rte_vdev_add_custom_scan(rte_vdev_scan_callback callback, void *user_arg)
+{
+ struct vdev_custom_scan *custom_scan;
+
+ rte_spinlock_lock(&vdev_custom_scan_lock);
+
+ /* check if already registered */
+ TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) {
+ if (custom_scan->callback == callback &&
+ custom_scan->user_arg == user_arg)
+ break;
+ }
+
+ if (custom_scan == NULL) {
+ custom_scan = malloc(sizeof(struct vdev_custom_scan));
+ if (custom_scan != NULL) {
+ custom_scan->callback = callback;
+ custom_scan->user_arg = user_arg;
+ TAILQ_INSERT_TAIL(&vdev_custom_scans, custom_scan, next);
+ }
+ }
+
+ rte_spinlock_unlock(&vdev_custom_scan_lock);
+
+ return (custom_scan == NULL) ? -1 : 0;
+}
+
+int
+rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg)
+{
+ struct vdev_custom_scan *custom_scan, *tmp_scan;
+
+ rte_spinlock_lock(&vdev_custom_scan_lock);
+ TAILQ_FOREACH_SAFE(custom_scan, &vdev_custom_scans, next, tmp_scan) {
+ if (custom_scan->callback != callback ||
+ (custom_scan->user_arg != (void *)-1 &&
+ custom_scan->user_arg != user_arg))
+ continue;
+ TAILQ_REMOVE(&vdev_custom_scans, custom_scan, next);
+ free(custom_scan);
+ }
+ rte_spinlock_unlock(&vdev_custom_scan_lock);
+
+ return 0;
+}
+
static int
vdev_parse(const char *name, void *addr)
{
@@ -260,6 +319,22 @@ vdev_scan(void)
{
struct rte_vdev_device *dev;
struct rte_devargs *devargs;
+ struct vdev_custom_scan *custom_scan;
+
+ /* call custom scan callbacks if any */
+ rte_spinlock_lock(&vdev_custom_scan_lock);
+ TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) {
+ if (custom_scan->callback != NULL)
+ /*
+ * the callback should update devargs list
+ * by calling rte_eal_devargs_insert() with
+ * devargs.bus = rte_bus_find_by_name("vdev");
+ * devargs.type = RTE_DEVTYPE_VIRTUAL;
+ * devargs.policy = RTE_DEV_WHITELISTED;
+ */
+ custom_scan->callback(custom_scan->user_arg);
+ }
+ rte_spinlock_unlock(&vdev_custom_scan_lock);
/* for virtual devices we scan the devargs_list populated via cmdline */
TAILQ_FOREACH(devargs, &devargs_list, next) {
@@ -289,6 +364,7 @@ static int
vdev_probe(void)
{
struct rte_vdev_device *dev;
+ int ret = 0;
/* call the init function for each virtual device */
TAILQ_FOREACH(dev, &vdev_device_list, next) {
@@ -299,11 +375,11 @@ vdev_probe(void)
if (vdev_probe_all_drivers(dev)) {
VDEV_LOG(ERR, "failed to initialize %s device\n",
rte_vdev_device_name(dev));
- return -1;
+ ret = -1;
}
}
- return 0;
+ return ret;
}
static struct rte_device *
diff --git a/drivers/bus/vdev/vdev_logs.h b/drivers/bus/vdev/vdev_logs.h
index 43b0ab8b..87593741 100644
--- a/drivers/bus/vdev/vdev_logs.h
+++ b/drivers/bus/vdev/vdev_logs.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#ifndef _VDEV_LOGS_H_
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 645b6967..628bd142 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2017 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
index ddfec4c6..d06c5444 100644
--- a/drivers/crypto/aesni_gcm/Makefile
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016-2017 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index d6a18efc..59e504ee 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#ifndef _AESNI_GCM_OPS_H_
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 08dcacce..83e54480 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_common.h>
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index 0f315f00..6f542137 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
*/
#include <string.h>
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 1c8835b5..3d60583b 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
@@ -84,7 +56,7 @@ struct aesni_gcm_qp {
/**< Session Mempool */
uint16_t id;
/**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_LEN];
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
/**< Unique Queue Pair Name */
uint8_t temp_digest[DIGEST_LENGTH_MAX];
/**< Buffer used to store the digest generated
diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index a49f06f2..d9f8fb98 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
index 59c3ee1e..4d596e85 100644
--- a/drivers/crypto/aesni_mb/aesni_mb_ops.h
+++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Intel Corporation
*/
#ifndef _AESNI_MB_OPS_H_
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 70043897..636c6c37 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
*/
#include <des.h>
@@ -110,6 +82,15 @@ aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
return AESNI_MB_OP_HASH_CIPHER;
}
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return AESNI_MB_OP_AEAD_CIPHER_HASH;
+ else
+ return AESNI_MB_OP_AEAD_HASH_CIPHER;
+ }
+ }
+
return AESNI_MB_OP_NOT_SUPPORTED;
}
@@ -285,6 +266,61 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
return 0;
}
+static int
+aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ aes_keyexp_t aes_keyexp_fn;
+
+ switch (xform->aead.op) {
+ case RTE_CRYPTO_AEAD_OP_ENCRYPT:
+ sess->cipher.direction = ENCRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+ break;
+ case RTE_CRYPTO_AEAD_OP_DECRYPT:
+ sess->cipher.direction = DECRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+ break;
+ default:
+ MB_LOG_ERR("Invalid aead operation parameter");
+ return -EINVAL;
+ }
+
+ switch (xform->aead.algo) {
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ sess->cipher.mode = CCM;
+ sess->auth.algo = AES_CCM;
+ break;
+ default:
+ MB_LOG_ERR("Unsupported aead mode parameter");
+ return -ENOTSUP;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->aead.iv.offset;
+ sess->iv.length = xform->aead.iv.length;
+
+ /* Check key length and choose key expansion function for AES */
+
+ switch (xform->aead.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ default:
+ MB_LOG_ERR("Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ /* Expanded cipher keys */
+ (*aes_keyexp_fn)(xform->aead.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+
+ return 0;
+}
+
/** Parse crypto xform chain and set private session parameters */
int
aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
@@ -293,6 +329,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
{
const struct rte_crypto_sym_xform *auth_xform = NULL;
const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
int ret;
/* Select Crypto operation - hash then cipher / cipher then hash */
@@ -326,6 +363,18 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
auth_xform = NULL;
cipher_xform = xform;
break;
+ case AESNI_MB_OP_AEAD_CIPHER_HASH:
+ sess->chain_order = CIPHER_HASH;
+ sess->aead.aad_len = xform->aead.aad_length;
+ sess->aead.digest_len = xform->aead.digest_length;
+ aead_xform = xform;
+ break;
+ case AESNI_MB_OP_AEAD_HASH_CIPHER:
+ sess->chain_order = HASH_CIPHER;
+ sess->aead.aad_len = xform->aead.aad_length;
+ sess->aead.digest_len = xform->aead.digest_length;
+ aead_xform = xform;
+ break;
case AESNI_MB_OP_NOT_SUPPORTED:
default:
MB_LOG_ERR("Unsupported operation chain order parameter");
@@ -348,6 +397,15 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
return ret;
}
+ if (aead_xform) {
+ ret = aesni_mb_set_session_aead_parameters(mb_ops, sess,
+ aead_xform);
+ if (ret != 0) {
+ MB_LOG_ERR("Invalid/unsupported aead parameters");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -462,6 +520,9 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->_k1_expanded = session->auth.xcbc.k1_expanded;
job->_k2 = session->auth.xcbc.k2;
job->_k3 = session->auth.xcbc.k3;
+ } else if (job->hash_alg == AES_CCM) {
+ job->u.CCM.aad = op->sym->aead.aad.data + 18;
+ job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
} else {
job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
job->hashed_auth_key_xor_opad = session->auth.pads.outer;
@@ -485,7 +546,10 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
rte_pktmbuf_data_len(op->sym->m_src));
} else {
m_dst = m_src;
- m_offset = op->sym->cipher.data.offset;
+ if (job->hash_alg == AES_CCM)
+ m_offset = op->sym->aead.data.offset;
+ else
+ m_offset = op->sym->cipher.data.offset;
}
/* Set digest output location */
@@ -494,30 +558,51 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->auth_tag_output = qp->temp_digests[*digest_idx];
*digest_idx = (*digest_idx + 1) % MAX_JOBS;
} else {
- job->auth_tag_output = op->sym->auth.digest.data;
+ if (job->hash_alg == AES_CCM)
+ job->auth_tag_output = op->sym->aead.digest.data;
+ else
+ job->auth_tag_output = op->sym->auth.digest.data;
}
/*
* Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
- job->auth_tag_output_len_in_bytes =
- get_truncated_digest_byte_length(job->hash_alg);
+ if (job->hash_alg != AES_CCM)
+ job->auth_tag_output_len_in_bytes =
+ get_truncated_digest_byte_length(job->hash_alg);
+ else
+ job->auth_tag_output_len_in_bytes = session->aead.digest_len;
+
/* Set IV parameters */
- job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- session->iv.offset);
+
job->iv_len_in_bytes = session->iv.length;
/* Data Parameter */
job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
- job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+ if (job->hash_alg == AES_CCM) {
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->aead.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+ job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
- job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
- job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset + 1);
+ } else {
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ }
/* Set user data to be crypto operation data struct */
job->user_data = op;
@@ -529,9 +614,15 @@ static inline void
verify_digest(struct aesni_mb_qp *qp __rte_unused, JOB_AES_HMAC *job,
struct rte_crypto_op *op) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
- job->auth_tag_output_len_in_bytes) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ if (job->hash_alg == AES_CCM) {
+ if (memcmp(job->auth_tag_output, op->sym->aead.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
}
/**
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 3b3ef0c0..9d685a09 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
*/
#include <string.h>
@@ -287,7 +259,36 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
-
+ { /* AES CCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 16,
+ .increment = 2
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 46,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 7,
+ .max = 13,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
@@ -409,7 +410,7 @@ aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
const char *str, unsigned int ring_size, int socket_id)
{
struct rte_ring *r;
- char ring_name[RTE_CRYPTODEV_NAME_LEN];
+ char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned int n = snprintf(ring_name, sizeof(ring_name),
"%s_%s",
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index fe3bd730..948e091c 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2016 Intel Corporation
*/
#ifndef _RTE_AESNI_MB_PMD_PRIVATE_H_
@@ -71,6 +43,7 @@ static const unsigned auth_blocksize[] = {
[SHA_384] = 128,
[SHA_512] = 128,
[AES_XCBC] = 16,
+ [AES_CCM] = 16,
};
/**
@@ -93,6 +66,7 @@ static const unsigned auth_truncated_digest_byte_lengths[] = {
[SHA_384] = 24,
[SHA_512] = 32,
[AES_XCBC] = 12,
+ [AES_CCM] = 8,
[NULL_HASH] = 0
};
@@ -137,6 +111,8 @@ enum aesni_mb_operation {
AESNI_MB_OP_CIPHER_HASH,
AESNI_MB_OP_HASH_ONLY,
AESNI_MB_OP_CIPHER_ONLY,
+ AESNI_MB_OP_AEAD_HASH_CIPHER,
+ AESNI_MB_OP_AEAD_CIPHER_HASH,
AESNI_MB_OP_NOT_SUPPORTED
};
@@ -154,7 +130,7 @@ struct aesni_mb_private {
struct aesni_mb_qp {
uint16_t id;
/**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_LEN];
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
/**< Unique Queue Pair Name */
const struct aesni_mb_op_fns *op_fns;
/**< Vector mode dependent pointer table of the multi-buffer APIs */
@@ -238,6 +214,12 @@ struct aesni_mb_session {
/**< Expanded XCBC authentication keys */
};
} auth;
+ struct {
+ /** AAD data length */
+ uint16_t aad_len;
+ /** digest size */
+ uint16_t digest_len;
+ } aead;
} __rte_cache_aligned;
diff --git a/drivers/crypto/armv8/Makefile b/drivers/crypto/armv8/Makefile
index 79c260ff..e862af72 100644
--- a/drivers/crypto/armv8/Makefile
+++ b/drivers/crypto/armv8/Makefile
@@ -1,33 +1,5 @@
-#
-# BSD LICENSE
-#
-# Copyright (C) Cavium, Inc. 2017.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Cavium, Inc nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
#
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index 97719f27..59fffcf1 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include <stdbool.h>
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
index 63776b26..3817ad7b 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include <string.h>
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
index fa31f0a0..b8966e93 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_private.h
+++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef _RTE_ARMV8_PMD_PRIVATE_H_
@@ -148,7 +120,7 @@ struct armv8_crypto_qp {
/**< Session Mempool */
struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
- char name[RTE_CRYPTODEV_NAME_LEN];
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
/**< Unique Queue Pair Name */
uint8_t temp_digest[DIGEST_LENGTH_MAX];
/**< Buffer used to store the digest generated
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index a08f2717..cb6c63e6 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -1,33 +1,7 @@
-# BSD LICENSE
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2016 NXP
#
-# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-# Copyright 2016 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Freescale Semiconductor, Inc nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -43,6 +17,7 @@ endif
LIB = librte_pmd_dpaa2_sec.a
# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT),y)
CFLAGS += -O0 -g
CFLAGS += "-Wno-error"
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 9c64c5d9..9a790ddd 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <time.h>
@@ -74,6 +48,7 @@
#define FLE_POOL_NUM_BUFS 32000
#define FLE_POOL_BUF_SIZE 256
#define FLE_POOL_CACHE_SIZE 512
+#define FLE_SG_MEM_SIZE 2048
#define SEC_FLC_DHR_OUTBOUND -114
#define SEC_FLC_DHR_INBOUND 0
@@ -112,6 +87,153 @@ build_proto_fd(dpaa2_sec_session *sess,
}
static inline int
+build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+ int icv_len = sess->digest_length;
+ uint8_t *old_icv;
+ struct rte_mbuf *mbuf;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ /* first FLE entry used to store mbuf and session ctxt */
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ RTE_LOG(ERR, PMD, "GCM SG: Memory alloc failed for SGE\n");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ PMD_TX_LOG(DEBUG, "GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "iv-len=%d data_off: 0x%x\n",
+ sym_op->aead.data.offset,
+ sym_op->aead.data.length,
+ sym_op->aead.digest.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+ op_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + icv_len + auth_only_len) :
+ sym_op->aead.data.length + auth_only_len;
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
+ auth_only_len);
+ sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+ sge->length = icv_len;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ mbuf = sym_op->m_src;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ ip_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len +
+ icv_len);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ if (auth_only_len) {
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+ sge->length = auth_only_len;
+ sge++;
+ }
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->aead.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->aead.digest.data, icv_len);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = icv_len;
+ }
+
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
+static inline int
build_authenc_gcm_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
@@ -142,7 +264,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
*/
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ RTE_LOG(ERR, PMD, "GCM: Memory alloc failed for SGE\n");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -175,7 +297,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+ PMD_TX_LOG(DEBUG, "GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
"iv-len=%d data_off: 0x%x\n",
sym_op->aead.data.offset,
sym_op->aead.data.length,
@@ -260,6 +382,151 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
}
static inline int
+build_authenc_sg_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sym_op->auth.data.length -
+ sym_op->cipher.data.length;
+ int icv_len = sess->digest_length;
+ uint8_t *old_icv;
+ struct rte_mbuf *mbuf;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ /* first FLE entry used to store mbuf and session ctxt */
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ RTE_LOG(ERR, PMD, "AUTHENC SG: Memory alloc failed for SGE\n");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ PMD_TX_LOG(DEBUG,
+ "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sym_op->auth.digest.length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sym_op->cipher.iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+ op_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->cipher.data.length + icv_len) :
+ sym_op->cipher.data.length;
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ sge->length = icv_len;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ mbuf = sym_op->m_src;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ ip_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->auth.data.length + sess->iv.length) :
+ (sym_op->auth.data.length + sess->iv.length +
+ icv_len);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->auth.digest.data,
+ icv_len);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = icv_len;
+ }
+
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
+static inline int
build_authenc_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
@@ -324,7 +591,7 @@ build_authenc_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+ PMD_TX_LOG(DEBUG, "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
sym_op->auth.data.offset,
sym_op->auth.data.length,
@@ -400,6 +667,86 @@ build_authenc_fd(dpaa2_sec_session *sess,
return 0;
}
+static inline int build_auth_sg_fd(
+ dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd,
+ __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *old_digest;
+ struct rte_mbuf *mbuf;
+
+ PMD_INIT_FUNC_TRACE();
+
+ mbuf = sym_op->m_src;
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ RTE_LOG(ERR, PMD, "AUTH SG: Memory alloc failed for SGE\n");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ /* first FLE entry used to store mbuf and session ctxt */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+ /* sg FD */
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+ /* o/p fle */
+ DPAA2_SET_FLE_ADDR(op_fle,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ op_fle->length = sess->digest_length;
+
+ /* i/p fle */
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ /* i/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ /* i/p segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ if (sess->dir == DIR_ENC) {
+ /* Digest calculation case */
+ sge->length -= sess->digest_length;
+ ip_fle->length = sym_op->auth.data.length;
+ } else {
+ /* Digest verification case */
+ sge++;
+ old_digest = (uint8_t *)(sge + 1);
+ rte_memcpy(old_digest, sym_op->auth.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+ sge->length = sess->digest_length;
+ ip_fle->length = sym_op->auth.data.length +
+ sess->digest_length;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
static inline int
build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
@@ -415,7 +762,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ RTE_LOG(ERR, PMD, "AUTH Memory alloc failed for SGE\n");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -491,6 +838,123 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
}
static int
+build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct rte_mbuf *mbuf;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ RTE_LOG(ERR, PMD, "CIPHER SG: Memory alloc failed for SGE\n");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ /* first FLE entry used to store mbuf and session ctxt */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ flc = &priv->flc_desc[0].flc;
+
+ PMD_TX_LOG(DEBUG,
+ "CIPHER SG: cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sym_op->cipher.iv.length,
+ sym_op->m_src->data_off);
+
+ /* o/p fle */
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+ op_fle->length = sym_op->cipher.data.length;
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+
+ /* o/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ PMD_TX_LOG(DEBUG,
+ "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
+
+ /* i/p fle */
+ mbuf = sym_op->m_src;
+ sge++;
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+ /* i/p IV */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ DPAA2_SET_FLE_OFFSET(sge, 0);
+ sge->length = sess->iv.length;
+
+ sge++;
+
+ /* i/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(ip_fle);
+
+ /* sg fd */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ PMD_TX_LOG(DEBUG,
+ "CIPHER SG: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+ (void *)DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+ return 0;
+}
+
+static int
build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
{
@@ -512,7 +976,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ RTE_LOG(ERR, PMD, "CIPHER: Memory alloc failed for SGE\n");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -548,7 +1012,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
+ PMD_TX_LOG(DEBUG,
+ "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d, data_off: 0x%x",
sym_op->cipher.data.offset,
sym_op->cipher.data.length,
sess->iv.length,
@@ -560,8 +1025,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
fle->length = sym_op->cipher.data.length + sess->iv.length;
- PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
- flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
+ PMD_TX_LOG(DEBUG,
+ "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
fle++;
@@ -582,7 +1049,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FLE_FIN(sge);
DPAA2_SET_FLE_FIN(fle);
- PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+ PMD_TX_LOG(DEBUG,
+ "CIPHER: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
(void *)DPAA2_GET_FD_ADDR(fd),
DPAA2_GET_FD_BPID(fd),
rte_dpaa2_bpid_info[bpid].meta_data_size,
@@ -600,13 +1068,6 @@ build_sec_fd(struct rte_crypto_op *op,
dpaa2_sec_session *sess;
PMD_INIT_FUNC_TRACE();
- /*
- * Segmented buffer is not supported.
- */
- if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
- op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return -ENOTSUP;
- }
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
sess = (dpaa2_sec_session *)get_session_private_data(
@@ -617,25 +1078,46 @@ build_sec_fd(struct rte_crypto_op *op,
else
return -1;
- switch (sess->ctxt_type) {
- case DPAA2_SEC_CIPHER:
- ret = build_cipher_fd(sess, op, fd, bpid);
- break;
- case DPAA2_SEC_AUTH:
- ret = build_auth_fd(sess, op, fd, bpid);
- break;
- case DPAA2_SEC_AEAD:
- ret = build_authenc_gcm_fd(sess, op, fd, bpid);
- break;
- case DPAA2_SEC_CIPHER_HASH:
- ret = build_authenc_fd(sess, op, fd, bpid);
- break;
- case DPAA2_SEC_IPSEC:
- ret = build_proto_fd(sess, op, fd, bpid);
- break;
- case DPAA2_SEC_HASH_CIPHER:
- default:
- RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ /* Segmented buffer */
+ if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ }
+ } else {
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_IPSEC:
+ ret = build_proto_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ }
}
return ret;
}
@@ -699,6 +1181,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
while (loop < frames_to_send) {
loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
&fd_arr[loop],
+ NULL,
frames_to_send - loop);
}
@@ -792,8 +1275,11 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
DPAA2_GET_FD_LEN(fd));
/* free the fle memory */
- priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
- rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
+ if (likely(rte_pktmbuf_is_contiguous(src))) {
+ priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+ rte_mempool_put(priv->fle_pool, (void *)(fle-1));
+ } else
+ rte_free((void *)(fle-1));
return op;
}
@@ -1309,7 +1795,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AEAD_AES_GCM:
aeaddata.algtype = OP_ALG_ALGSEL_AES;
aeaddata.algmode = OP_ALG_AAI_GCM;
- session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
@@ -2278,7 +2764,8 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_SECURITY;
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
internals = cryptodev->data->dev_private;
internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
@@ -2420,6 +2907,7 @@ cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
}
static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
+ .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
.drv_type = DPAA2_CRYPTO,
.driver = {
.name = "DPAA2 SEC PMD"
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
index b5e99f80..23251141 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DPAA2_SEC_LOGS_H_
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 14e71df5..e8ac95ba 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
@@ -169,6 +143,7 @@ typedef struct dpaa2_sec_session_entry {
uint8_t dir; /*!< Operation Direction */
enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
union {
struct {
uint8_t *data; /**< pointer to key data */
diff --git a/drivers/crypto/dpaa2_sec/hw/compat.h b/drivers/crypto/dpaa2_sec/hw/compat.h
index b17b27ef..ce946ccb 100644
--- a/drivers/crypto/dpaa2_sec/hw/compat.h
+++ b/drivers/crypto/dpaa2_sec/hw/compat.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_COMPAT_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/desc.h b/drivers/crypto/dpaa2_sec/hw/desc.h
index 0be4439c..e9255832 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
index 5ae3a1ac..91f3e067 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DESC_ALGO_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/common.h b/drivers/crypto/dpaa2_sec/hw/desc/common.h
index c2ac99be..98425d8b 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/common.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/common.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DESC_COMMON_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
index cc637361..35cc02a6 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DESC_IPSEC_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta.h b/drivers/crypto/dpaa2_sec/hw/rta.h
index 283ad600..c4bbad0b 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2016 NXP
+ *
*/
#ifndef __RTA_RTA_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
index dc0a3342..8c807aaa 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_FIFO_LOAD_STORE_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
index 96c9f1e0..0c7ea938 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_HEADER_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
index cba65073..546d22e9 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_JUMP_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
index 8e097856..1ec21234 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_KEY_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
index 2e786a20..f3b0dcfc 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_LOAD_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
index 64a84b47..5b28cbab 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_MATH_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
index 3b9ba792..a7ff7c67 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_MOVE_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
index f1ead2da..94f775e2 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_NFIFO_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
index 698ff530..b85760e5 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
@@ -1,46 +1,17 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
+ * Copyright 2016 NXP
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_OPERATION_CMD_H__
#define __RTA_OPERATION_CMD_H__
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 70000)
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+#endif
+
extern enum rta_sec_era rta_sec_era;
static inline int
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
index f134235a..d9a5b0e5 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_PROTOCOL_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
index 11995aa2..6e666108 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_SEC_RUN_TIME_ASM_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
index 4aea5770..ceb6a871 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_SEQ_IN_OUT_PTR_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
index 00597f30..4f694ac2 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_SIGNATURE_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
index 1b75692e..8b58e544 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2008-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RTA_STORE_CMD_H__
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
index 2a216afc..de8ca970 100644
--- a/drivers/crypto/dpaa2_sec/mc/dpseci.c
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 4acb595c..12ac005a 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPSECI_H
#define __FSL_DPSECI_H
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
index a100a0ec..26cef0f7 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_DPSECI_CMD_H
#define _FSL_DPSECI_CMD_H
diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile
index 17bc79c6..fe2c5932 100644
--- a/drivers/crypto/dpaa_sec/Makefile
+++ b/drivers/crypto/dpaa_sec/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
-# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-# Copyright 2017 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Freescale Semiconductor, Inc nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2017 NXP
include $(RTE_SDK)/mk/rte.vars.mk
@@ -37,6 +10,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_dpaa_sec.a
# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -D _GNU_SOURCE
ifeq ($(CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT),y)
CFLAGS += -O0 -g
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 16155b1a..18681cf3 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <fcntl.h>
@@ -41,6 +15,7 @@
#include <rte_cryptodev_pmd.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
+#include <rte_security_driver.h>
#include <rte_cycles.h>
#include <rte_dev.h>
#include <rte_kvargs.h>
@@ -69,6 +44,9 @@ static uint8_t cryptodev_driver_id;
static __thread struct rte_crypto_op **dpaa_sec_ops;
static __thread int dpaa_sec_op_nb;
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
static inline void
dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
{
@@ -106,6 +84,8 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
ctx->ctx_pool = ses->ctx_pool;
+ ctx->vtop_offset = (uint64_t) ctx
+ - rte_mempool_virt2iova(ctx);
return ctx;
}
@@ -121,7 +101,7 @@ dpaa_mem_vtop(void *vaddr)
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
if (vaddr_64 >= memseg[i].addr_64 &&
vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
- paddr = memseg[i].phys_addr +
+ paddr = memseg[i].iova +
(vaddr_64 - memseg[i].addr_64);
return (rte_iova_t)paddr;
@@ -130,6 +110,13 @@ dpaa_mem_vtop(void *vaddr)
return (rte_iova_t)(NULL);
}
+/* virtual address conversin when mempool support is available for ctx */
+static inline phys_addr_t
+dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
+{
+ return (uint64_t)vaddr - ctx->vtop_offset;
+}
+
static inline void *
dpaa_mem_ptov(rte_iova_t paddr)
{
@@ -137,10 +124,10 @@ dpaa_mem_ptov(rte_iova_t paddr)
int i;
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (paddr >= memseg[i].phys_addr &&
- (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
+ if (paddr >= memseg[i].iova &&
+ (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
return (void *)(memseg[i].addr_64 +
- (paddr - memseg[i].phys_addr));
+ (paddr - memseg[i].iova));
}
return NULL;
}
@@ -168,15 +155,6 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
/* Clear FQ options */
memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
- flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
- QMAN_FQ_FLAG_TO_DCPORTAL;
-
- ret = qman_create_fq(0, flags, fq_in);
- if (unlikely(ret != 0)) {
- PMD_INIT_LOG(ERR, "qman_create_fq failed");
- return ret;
- }
-
flags = QMAN_INITFQ_FLAG_SCHED;
fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
QM_INITFQ_WE_CONTEXTB;
@@ -188,9 +166,11 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
fq_in->cb.ern = ern_sec_fq_handler;
+ PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
+
ret = qman_init_fq(fq_in, flags, &fq_opts);
if (unlikely(ret != 0))
- PMD_INIT_LOG(ERR, "qman_init_fq failed");
+ PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
return ret;
}
@@ -217,8 +197,19 @@ dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
* sg[1] for input
*/
job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
+ if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ ctx->op->sym->m_src->pkt_len = len;
+ ctx->op->sym->m_src->data_len = len;
+ }
dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
dpaa_sec_op_ending(ctx);
@@ -282,7 +273,13 @@ static inline int is_aead(dpaa_sec_session *ses)
static inline int is_auth_cipher(dpaa_sec_session *ses)
{
return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
- (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
+ (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+}
+
+static inline int is_proto_ipsec(dpaa_sec_session *ses)
+{
+ return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
}
static inline int is_encode(dpaa_sec_session *ses)
@@ -303,27 +300,39 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
ses->digest_length = 0;
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
- alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
alginfo_a->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA1_HMAC:
- alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
alginfo_a->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
- alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
alginfo_a->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
- alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
alginfo_a->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
- alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
alginfo_a->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
- alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
alginfo_a->algmode = OP_ALG_AAI_HMAC;
break;
default:
@@ -338,15 +347,21 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
case RTE_CRYPTO_CIPHER_NULL:
break;
case RTE_CRYPTO_CIPHER_AES_CBC:
- alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
alginfo_c->algmode = OP_ALG_AAI_CBC;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
- alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
alginfo_c->algmode = OP_ALG_AAI_CBC;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
- alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
alginfo_c->algmode = OP_ALG_AAI_CTR;
break;
default:
@@ -374,7 +389,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
{
struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
uint32_t shared_desc_len = 0;
- struct sec_cdb *cdb = &ses->qp->cdb;
+ struct sec_cdb *cdb = &ses->cdb;
int err;
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
int swap = false;
@@ -492,14 +507,28 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
cdb->sh_desc[0] = 0;
cdb->sh_desc[1] = 0;
cdb->sh_desc[2] = 0;
-
- /* Auth_only_len is set as 0 here and it will be overwritten
- * in fd for each packet.
- */
- shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
- true, swap, &alginfo_c, &alginfo_a,
- ses->iv.length, 0,
- ses->digest_length, ses->dir);
+ if (is_proto_ipsec(ses)) {
+ if (ses->dir == DIR_ENC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+ cdb->sh_desc,
+ true, swap, &ses->encap_pdb,
+ (uint8_t *)&ses->ip4_hdr,
+ &alginfo_c, &alginfo_a);
+ } else if (ses->dir == DIR_DEC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+ cdb->sh_desc,
+ true, swap, &ses->decap_pdb,
+ &alginfo_c, &alginfo_a);
+ }
+ } else {
+ /* Auth_only_len is set as 0 here and it will be
+ * overwritten in fd for each packet.
+ */
+ shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+ true, swap, &alginfo_c, &alginfo_a,
+ ses->iv.length, 0,
+ ses->digest_length, ses->dir);
+ }
}
cdb->sh_hdr.hi.field.idlen = shared_desc_len;
cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
@@ -508,46 +537,146 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
return 0;
}
-static inline unsigned int
-dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
+/* qp is lockless, should be accessed by only one thread */
+static int
+dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
{
+ struct qman_fq *fq;
unsigned int pkts = 0;
int ret;
- struct qm_mcr_queryfq_np np;
- enum qman_fq_state state;
- uint32_t flags;
- uint32_t vdqcr;
-
- qman_query_fq_np(fq, &np);
- if (np.frm_cnt) {
- vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
- if (exact)
- vdqcr |= QM_VDQCR_EXACT;
- ret = qman_volatile_dequeue(fq, 0, vdqcr);
- if (ret)
- return 0;
- do {
- pkts += qman_poll_dqrr(len);
- qman_fq_state(fq, &state, &flags);
- } while (flags & QMAN_FQ_STATE_VDQCR);
- }
+ struct qm_dqrr_entry *dq;
+
+ fq = &qp->outq;
+ ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
+ DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
+ if (ret)
+ return 0;
+
+ do {
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+ struct rte_crypto_op *op;
+
+ dq = qman_dequeue(fq);
+ if (!dq)
+ continue;
+
+ fd = &dq->fd;
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ op = ctx->op;
+ if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct qm_sg_entry *sg_out;
+ uint32_t len;
+
+ sg_out = &job->sg[0];
+ hw_sg_to_cpu(sg_out);
+ len = sg_out->length;
+ op->sym->m_src->pkt_len = len;
+ op->sym->m_src->data_len = len;
+ }
+ if (!ctx->fd_status) {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ printf("\nSEC return err: 0x%x", ctx->fd_status);
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ ops[pkts++] = op;
+
+ /* report op status to sym->op and then free the ctx memeory */
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+ qman_dqrr_consume(fq, dq);
+ } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
return pkts;
}
-/* qp is lockless, should be accessed by only one thread */
-static int
-dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
+static inline struct dpaa_sec_job *
+build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
- struct qman_fq *fq;
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ phys_addr_t start_addr;
+ uint8_t *old_digest, extra_segs;
- fq = &qp->outq;
- dpaa_sec_op_nb = 0;
- dpaa_sec_ops = ops;
+ if (is_decode(ses))
+ extra_segs = 3;
+ else
+ extra_segs = 2;
- if (unlikely(nb_ops > DPAA_SEC_BURST))
- nb_ops = DPAA_SEC_BURST;
+ if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+ PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
- return dpaa_volatile_deq(fq, nb_ops, 1);
+ cf = &ctx->job;
+ ctx->op = op;
+ old_digest = ctx->digest;
+
+ /* output */
+ out_sg = &cf->sg[0];
+ qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
+ out_sg->length = ses->digest_length;
+ cpu_to_hw_sg(out_sg);
+
+ /* input */
+ in_sg = &cf->sg[1];
+ /* need to extend the input to a compound frame */
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ in_sg->length = sym->auth.data.length;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+
+ /* 1st seg */
+ sg = in_sg + 1;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ /* Digest verification case */
+ cpu_to_hw_sg(sg);
+ sg++;
+ rte_memcpy(old_digest, sym->auth.digest.data,
+ ses->digest_length);
+ start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+ qm_sg_entry_set64(sg, start_addr);
+ sg->length = ses->digest_length;
+ in_sg->length += ses->digest_length;
+ } else {
+ /* Digest calculation case */
+ sg->length -= ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ cpu_to_hw_sg(in_sg);
+
+ return cf;
}
/**
@@ -589,7 +718,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
if (is_decode(ses)) {
/* need to extend the input to a compound frame */
sg->extension = 1;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
sg->length = sym->auth.data.length + ses->digest_length;
sg->final = 1;
cpu_to_hw_sg(sg);
@@ -603,7 +732,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
cpu_to_hw_sg(sg);
/* let's check digest by hw */
- start_addr = dpaa_mem_vtop(old_digest);
+ start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
sg++;
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
@@ -620,6 +749,101 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
}
static inline struct dpaa_sec_job *
+build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 3;
+ }
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ out_sg->length = sym->cipher.data.length;
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->cipher.data.offset;
+ sg->offset = sym->cipher.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ in_sg->length = sym->cipher.data.length + ses->iv.length;
+
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 1st seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->cipher.data.offset;
+ sg->offset = sym->cipher.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
struct rte_crypto_sym_op *sym = op->sym;
@@ -657,7 +881,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg->extension = 1;
sg->final = 1;
sg->length = sym->cipher.data.length + ses->iv.length;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
cpu_to_hw_sg(sg);
sg = &cf->sg[2];
@@ -675,6 +899,145 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
}
static inline struct dpaa_sec_job *
+build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 4;
+ }
+
+ if (ses->auth_only_len)
+ req_segs++;
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ rte_prefetch0(cf->sg);
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ if (is_encode(ses))
+ out_sg->length = sym->aead.data.length + ses->auth_only_len
+ + ses->digest_length;
+ else
+ out_sg->length = sym->aead.data.length + ses->auth_only_len;
+
+ /* output sg entries */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->aead.data.offset +
+ ses->auth_only_len;
+ sg->offset = sym->aead.data.offset - ses->auth_only_len;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->length -= ses->digest_length;
+
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ if (is_encode(ses))
+ in_sg->length = ses->iv.length + sym->aead.data.length
+ + ses->auth_only_len;
+ else
+ in_sg->length = ses->iv.length + sym->aead.data.length
+ + ses->auth_only_len + ses->digest_length;
+
+ /* input sg entries */
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* 1st seg IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 2nd seg auth only */
+ if (ses->auth_only_len) {
+ sg++;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ cpu_to_hw_sg(sg);
+ }
+
+ /* 3rd seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->aead.data.offset;
+ sg->offset = sym->aead.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ memcpy(ctx->digest, sym->aead.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
struct rte_crypto_sym_op *sym = op->sym;
@@ -703,7 +1066,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
if (is_encode(ses)) {
qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
@@ -748,7 +1111,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -762,7 +1125,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
qm_sg_entry_set64(sg,
dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
sg->length = sym->aead.data.length + ses->auth_only_len;
@@ -787,6 +1150,132 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
}
static inline struct dpaa_sec_job *
+build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 4;
+ }
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ rte_prefetch0(cf->sg);
+
+ /* output */
+ out_sg = &cf->sg[0];
+ out_sg->extension = 1;
+ if (is_encode(ses))
+ out_sg->length = sym->auth.data.length + ses->digest_length;
+ else
+ out_sg->length = sym->auth.data.length;
+
+ /* output sg entries */
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ cpu_to_hw_sg(out_sg);
+
+ /* 1st seg */
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sg->length -= ses->digest_length;
+
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ mbuf = sym->m_src;
+ in_sg = &cf->sg[1];
+ in_sg->extension = 1;
+ in_sg->final = 1;
+ if (is_encode(ses))
+ in_sg->length = ses->iv.length + sym->auth.data.length;
+ else
+ in_sg->length = ses->iv.length + sym->auth.data.length
+ + ses->digest_length;
+
+ /* input sg entries */
+ sg++;
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ cpu_to_hw_sg(in_sg);
+
+ /* 1st seg IV */
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* 2nd seg */
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len - sym->auth.data.offset;
+ sg->offset = sym->auth.data.offset;
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+ sg->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ sg->length -= ses->digest_length;
+ if (is_decode(ses)) {
+ cpu_to_hw_sg(sg);
+ sg++;
+ memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ sg->length = ses->digest_length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
struct rte_crypto_sym_op *sym = op->sym;
@@ -814,7 +1303,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
if (is_encode(ses)) {
qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
@@ -844,7 +1333,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -858,7 +1347,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length;
length = sg->length;
@@ -881,64 +1370,43 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
return cf;
}
-static int
-dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp)
+static inline struct dpaa_sec_job *
+build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
+ struct rte_crypto_sym_op *sym = op->sym;
struct dpaa_sec_job *cf;
- dpaa_sec_session *ses;
- struct qm_fd fd;
- int ret;
- uint32_t auth_only_len = op->sym->auth.data.length -
- op->sym->cipher.data.length;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ phys_addr_t src_start_addr, dst_start_addr;
- ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
- cryptodev_driver_id);
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+ cf = &ctx->job;
+ ctx->op = op;
- if (unlikely(!qp->ses || qp->ses != ses)) {
- qp->ses = ses;
- ses->qp = qp;
- ret = dpaa_sec_prep_cdb(ses);
- if (ret)
- return ret;
- }
+ src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
- /*
- * Segmented buffer is not supported.
- */
- if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
- op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return -ENOTSUP;
- }
- if (is_auth_only(ses)) {
- cf = build_auth_only(op, ses);
- } else if (is_cipher_only(ses)) {
- cf = build_cipher_only(op, ses);
- } else if (is_aead(ses)) {
- cf = build_cipher_auth_gcm(op, ses);
- auth_only_len = ses->auth_only_len;
- } else if (is_auth_cipher(ses)) {
- cf = build_cipher_auth(op, ses);
- } else {
- PMD_TX_LOG(ERR, "not supported sec op");
- return -ENOTSUP;
- }
- if (unlikely(!cf))
- return -ENOMEM;
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
- memset(&fd, 0, sizeof(struct qm_fd));
- qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
- fd._format1 = qm_fd_compound;
- fd.length29 = 2 * sizeof(struct qm_sg_entry);
- /* Auth_only_len is set as 0 in descriptor and it is overwritten
- * here in the fd.cmd which will update the DPOVRD reg.
- */
- if (auth_only_len)
- fd.cmd = 0x80000000 | auth_only_len;
- do {
- ret = qman_enqueue(&qp->inq, &fd, 0);
- } while (ret != 0);
+ /* input */
+ sg = &cf->sg[1];
+ qm_sg_entry_set64(sg, src_start_addr);
+ sg->length = sym->m_src->pkt_len;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
- return 0;
+ sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, dst_start_addr);
+ sg->length = sym->m_src->buf_len - sym->m_src->data_off;
+ cpu_to_hw_sg(sg);
+
+ return cf;
}
static uint16_t
@@ -947,23 +1415,120 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
{
/* Function to transmit the frames to given device and queuepair */
uint32_t loop;
- int32_t ret;
struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
uint16_t num_tx = 0;
+ struct qm_fd fds[DPAA_SEC_BURST], *fd;
+ uint32_t frames_to_send;
+ struct rte_crypto_op *op;
+ struct dpaa_sec_job *cf;
+ dpaa_sec_session *ses;
+ struct dpaa_sec_op_ctx *ctx;
+ uint32_t auth_only_len;
+ struct qman_fq *inq[DPAA_SEC_BURST];
+
+ while (nb_ops) {
+ frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
+ DPAA_SEC_BURST : nb_ops;
+ for (loop = 0; loop < frames_to_send; loop++) {
+ op = *(ops++);
+ switch (op->sess_type) {
+ case RTE_CRYPTO_OP_WITH_SESSION:
+ ses = (dpaa_sec_session *)
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ break;
+ case RTE_CRYPTO_OP_SECURITY_SESSION:
+ ses = (dpaa_sec_session *)
+ get_sec_session_private_data(
+ op->sym->sec_session);
+ break;
+ default:
+ PMD_TX_LOG(ERR,
+ "sessionless crypto op not supported");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ if (unlikely(!ses->qp || ses->qp != qp)) {
+ PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
+ ses->qp, qp);
+ if (dpaa_sec_attach_sess_q(qp, ses)) {
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ }
- if (unlikely(nb_ops == 0))
- return 0;
+ auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+ if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ if (is_auth_only(ses)) {
+ cf = build_auth_only(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth(op, ses);
+ } else if (is_proto_ipsec(ses)) {
+ cf = build_proto(op, ses);
+ } else {
+ PMD_TX_LOG(ERR, "not supported sec op");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ } else {
+ if (is_auth_only(ses)) {
+ cf = build_auth_only_sg(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only_sg(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm_sg(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth_sg(op, ses);
+ } else {
+ PMD_TX_LOG(ERR, "not supported sec op");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ }
+ if (unlikely(!cf)) {
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
- /*Prepare each packet which is to be sent*/
- for (loop = 0; loop < nb_ops; loop++) {
- if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
- PMD_TX_LOG(ERR, "sessionless crypto op not supported");
- return 0;
+ fd = &fds[loop];
+ inq[loop] = ses->inq;
+ fd->opaque_addr = 0;
+ fd->cmd = 0;
+ ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
+ qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
+ fd->_format1 = qm_fd_compound;
+ fd->length29 = 2 * sizeof(struct qm_sg_entry);
+ /* Auth_only_len is set as 0 in descriptor and it is
+ * overwritten here in the fd.cmd which will update
+ * the DPOVRD reg.
+ */
+ if (auth_only_len)
+ fd->cmd = 0x80000000 | auth_only_len;
+
+ }
+send_pkts:
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+ frames_to_send - loop);
}
- ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
- if (!ret)
- num_tx++;
+ nb_ops -= frames_to_send;
+ num_tx += frames_to_send;
}
+
dpaa_qp->tx_pkts += num_tx;
dpaa_qp->tx_errs += nb_ops - num_tx;
@@ -1151,43 +1716,84 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
return 0;
}
-static int
-dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+static struct qman_fq *
+dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
{
- dpaa_sec_session *sess = ses;
- struct dpaa_sec_qp *qp;
+ unsigned int i;
- PMD_INIT_FUNC_TRACE();
+ for (i = 0; i < qi->max_nb_sessions; i++) {
+ if (qi->inq_attach[i] == 0) {
+ qi->inq_attach[i] = 1;
+ return &qi->inq[i];
+ }
+ }
+ PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
- qp = dev->data->queue_pairs[qp_id];
- if (qp->ses != NULL) {
- PMD_INIT_LOG(ERR, "qp in-use by another session\n");
- return -EBUSY;
+ return NULL;
+}
+
+static int
+dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
+{
+ unsigned int i;
+
+ for (i = 0; i < qi->max_nb_sessions; i++) {
+ if (&qi->inq[i] == fq) {
+ qman_retire_fq(fq, NULL);
+ qman_oos_fq(fq);
+ qi->inq_attach[i] = 0;
+ return 0;
+ }
}
+ return -1;
+}
+
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
+{
+ int ret;
- qp->ses = sess;
sess->qp = qp;
+ ret = dpaa_sec_prep_cdb(sess);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
+ return -1;
+ }
+
+ ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
+ qman_fq_fqid(&qp->outq));
+ if (ret)
+ PMD_DRV_LOG(ERR, "Unable to init sec queue");
+
+ return ret;
+}
- return dpaa_sec_prep_cdb(sess);
+static int
+dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
+ uint16_t qp_id __rte_unused,
+ void *ses __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
}
static int
-dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
+ uint16_t qp_id __rte_unused,
+ void *ses)
{
dpaa_sec_session *sess = ses;
- struct dpaa_sec_qp *qp;
+ struct dpaa_sec_dev_private *qi = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
- qp = dev->data->queue_pairs[qp_id];
- if (qp->ses != NULL) {
- qp->ses = NULL;
- sess->qp = NULL;
- return 0;
- }
+ if (sess->inq)
+ dpaa_sec_detach_rxq(qi, sess->inq);
+ sess->inq = NULL;
- PMD_DRV_LOG(ERR, "No session attached to qp");
- return -EINVAL;
+ sess->qp = NULL;
+
+ return 0;
}
static int
@@ -1250,8 +1856,20 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
return -EINVAL;
}
session->ctx_pool = internals->ctx_pool;
+ session->inq = dpaa_sec_attach_rxq(internals);
+ if (session->inq == NULL) {
+ PMD_DRV_LOG(ERR, "unable to attach sec queue");
+ goto err1;
+ }
return 0;
+
+err1:
+ rte_free(session->cipher_key.data);
+ rte_free(session->auth_key.data);
+ memset(session, 0, sizeof(dpaa_sec_session));
+
+ return -EINVAL;
}
static int
@@ -1284,6 +1902,7 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
set_session_private_data(sess, dev->driver_id,
sess_private_data);
+
return 0;
}
@@ -1292,22 +1911,258 @@ static void
dpaa_sec_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
- PMD_INIT_FUNC_TRACE();
+ struct dpaa_sec_dev_private *qi = dev->data->dev_private;
uint8_t index = dev->driver_id;
void *sess_priv = get_session_private_data(sess, index);
+
+ PMD_INIT_FUNC_TRACE();
+
dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ if (s->inq)
+ dpaa_sec_detach_rxq(qi, s->inq);
rte_free(s->cipher_key.data);
rte_free(s->auth_key.data);
memset(s, 0, sizeof(dpaa_sec_session));
- struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
set_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
static int
+dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_auth_xform *auth_xform;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ dpaa_sec_session *session = (dpaa_sec_session *)sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ auth_xform = &conf->crypto_xform->next->auth;
+ } else {
+ auth_xform = &conf->crypto_xform->auth;
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ }
+ session->proto_alg = conf->protocol;
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ rte_free(session->cipher_key.data);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+ auth_xform->algo);
+ goto out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ auth_xform->algo);
+ goto out;
+ }
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ cipher_xform->algo);
+ goto out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto out;
+ }
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
+ sizeof(session->ip4_hdr));
+ session->ip4_hdr.ip_v = IPVERSION;
+ session->ip4_hdr.ip_hl = 5;
+ session->ip4_hdr.ip_len = rte_cpu_to_be_16(
+ sizeof(session->ip4_hdr));
+ session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+ session->ip4_hdr.ip_id = 0;
+ session->ip4_hdr.ip_off = 0;
+ session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+ session->ip4_hdr.ip_p = (ipsec_xform->proto ==
+ RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
+ : IPPROTO_AH;
+ session->ip4_hdr.ip_sum = 0;
+ session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+ session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+ session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
+ (void *)&session->ip4_hdr,
+ sizeof(struct ip));
+
+ session->encap_pdb.options =
+ (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+ PDBOPTS_ESP_OIHI_PDB_INL |
+ PDBOPTS_ESP_IVSRC |
+ PDBHMO_ESP_ENCAP_DTTL;
+ session->encap_pdb.spi = ipsec_xform->spi;
+ session->encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+ session->dir = DIR_ENC;
+ } else if (ipsec_xform->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+ session->decap_pdb.options = sizeof(struct ip) << 16;
+ session->dir = DIR_DEC;
+ } else
+ goto out;
+ session->ctx_pool = internals->ctx_pool;
+ session->inq = dpaa_sec_attach_rxq(internals);
+ if (session->inq == NULL) {
+ PMD_DRV_LOG(ERR, "unable to attach sec queue");
+ goto out;
+ }
+
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ memset(session, 0, sizeof(dpaa_sec_session));
+ return -1;
+}
+
+static int
+dpaa_sec_security_session_create(void *dev,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ switch (conf->protocol) {
+ case RTE_SECURITY_PROTOCOL_IPSEC:
+ ret = dpaa_sec_set_ipsec_session(cdev, conf,
+ sess_private_data);
+ break;
+ case RTE_SECURITY_PROTOCOL_MACSEC:
+ return -ENOTSUP;
+ default:
+ return -EINVAL;
+ }
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "DPAA2 PMD: failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sec_session_private_data(sess, sess_private_data);
+
+ return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa_sec_security_session_destroy(void *dev __rte_unused,
+ struct rte_security_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ void *sess_priv = get_sec_session_private_data(sess);
+
+ dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa_sec_session));
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+ return 0;
+}
+
+
+static int
dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_cryptodev_config *config __rte_unused)
{
@@ -1349,7 +2204,8 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
info->capabilities = dpaa_sec_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
info->sym.max_nb_sessions_per_qp =
- RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
+ RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
+ RTE_DPAA_MAX_NB_SEC_QPS;
info->driver_id = cryptodev_driver_id;
}
}
@@ -1372,6 +2228,21 @@ static struct rte_cryptodev_ops crypto_ops = {
.qp_detach_session = dpaa_sec_qp_detach_sess,
};
+static const struct rte_security_capability *
+dpaa_sec_capabilities_get(void *device __rte_unused)
+{
+ return dpaa_sec_security_cap;
+}
+
+struct rte_security_ops dpaa_sec_security_ops = {
+ .session_create = dpaa_sec_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = dpaa_sec_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = dpaa_sec_capabilities_get
+};
+
static int
dpaa_sec_uninit(struct rte_cryptodev *dev)
{
@@ -1380,6 +2251,8 @@ dpaa_sec_uninit(struct rte_cryptodev *dev)
if (dev == NULL)
return -ENODEV;
+ rte_free(dev->security_ctx);
+
rte_mempool_free(internals->ctx_pool);
rte_free(internals);
@@ -1393,8 +2266,9 @@ static int
dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
{
struct dpaa_sec_dev_private *internals;
+ struct rte_security_ctx *security_instance;
struct dpaa_sec_qp *qp;
- uint32_t i;
+ uint32_t i, flags;
int ret;
char str[20];
@@ -1407,12 +2281,34 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
internals = cryptodev->data->dev_private;
- internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
+ internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ return 0;
+ }
+
+ /* Initialize security_ctx only for primary process*/
+ security_instance = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (security_instance == NULL)
+ return -ENOMEM;
+ security_instance->device = (void *)cryptodev;
+ security_instance->ops = &dpaa_sec_security_ops;
+ security_instance->sess_cnt = 0;
+ cryptodev->security_ctx = security_instance;
+
for (i = 0; i < internals->max_nb_queue_pairs; i++) {
/* init qman fq for queue pair */
qp = &internals->qps[i];
@@ -1421,10 +2317,15 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
goto init_error;
}
- ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
- qman_fq_fqid(&qp->outq));
- if (ret) {
- PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
+ }
+
+ flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
+ QMAN_FQ_FLAG_TO_DCPORTAL;
+ for (i = 0; i < internals->max_nb_sessions; i++) {
+ /* create rx qman fq for sessions*/
+ ret = qman_create_fq(0, flags, &internals->inq[i]);
+ if (unlikely(ret != 0)) {
+ PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
goto init_error;
}
}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index af3f2550..f45b36cb 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -1,40 +1,14 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DPAA_SEC_H_
#define _DPAA_SEC_H_
#define NUM_POOL_CHANNELS 4
-#define DPAA_SEC_BURST 32
+#define DPAA_SEC_BURST 7
#define DPAA_SEC_ALG_UNSUPPORT (-1)
#define TDES_CBC_IV_LEN 8
#define AES_CBC_IV_LEN 16
@@ -64,36 +38,6 @@ enum dpaa_sec_op_type {
DPAA_SEC_MAX
};
-typedef struct dpaa_sec_session_entry {
- uint8_t dir; /*!< Operation Direction */
- enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
- enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
- enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
- union {
- struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
- } aead_key;
- struct {
- struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
- } cipher_key;
- struct {
- uint8_t *data; /**< pointer to key data */
- size_t length; /**< key length in bytes */
- } auth_key;
- };
- };
- struct {
- uint16_t length;
- uint16_t offset;
- } iv; /**< Initialisation vector parameters */
- uint16_t auth_only_len; /*!< Length of data for Auth only */
- uint32_t digest_length;
- struct dpaa_sec_qp *qp;
- struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
-} dpaa_sec_session;
#define DPAA_SEC_MAX_DESC_SIZE 64
/* code or cmd block to caam */
@@ -143,11 +87,45 @@ struct sec_cdb {
uint32_t sh_desc[DPAA_SEC_MAX_DESC_SIZE];
};
+typedef struct dpaa_sec_session_entry {
+ uint8_t dir; /*!< Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+ enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ };
+ };
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv; /**< Initialisation vector parameters */
+ uint16_t auth_only_len; /*!< Length of data for Auth only */
+ uint32_t digest_length;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ip ip4_hdr;
+ struct ipsec_decap_pdb decap_pdb;
+ struct dpaa_sec_qp *qp;
+ struct qman_fq *inq;
+ struct sec_cdb cdb; /**< cmd block associated with qp */
+ struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
+} dpaa_sec_session;
+
struct dpaa_sec_qp {
struct dpaa_sec_dev_private *internals;
- struct sec_cdb cdb; /* cmd block associated with qp */
- dpaa_sec_session *ses; /* session associated with qp */
- struct qman_fq inq;
struct qman_fq outq;
int rx_pkts;
int rx_errs;
@@ -155,12 +133,17 @@ struct dpaa_sec_qp {
int tx_errs;
};
-#define RTE_MAX_NB_SEC_QPS RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+#define RTE_DPAA_MAX_NB_SEC_QPS 1
+#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
+
/* internal sec queue interface */
struct dpaa_sec_dev_private {
void *sec_hw;
struct rte_mempool *ctx_pool; /* per dev mempool for dpaa_sec_op_ctx */
- struct dpaa_sec_qp qps[RTE_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+ struct dpaa_sec_qp qps[RTE_DPAA_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+ struct qman_fq inq[RTE_DPAA_MAX_RX_QUEUE];
+ unsigned char inq_attach[RTE_DPAA_MAX_RX_QUEUE];
unsigned int max_nb_queue_pairs;
unsigned int max_nb_sessions;
};
@@ -181,6 +164,7 @@ struct dpaa_sec_op_ctx {
struct rte_crypto_op *op;
struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
uint32_t fd_status;
+ int64_t vtop_offset;
uint8_t digest[DPAA_MAX_NB_MAX_DIGEST];
};
@@ -309,7 +293,7 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
- {.auth = {
+ {.aead = {
.algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
@@ -399,4 +383,60 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
+static const struct rte_security_capability dpaa_sec_security_cap[] = {
+ { /* IPsec Lookaside Protocol offload ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa_sec_capabilities
+ },
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa_sec_capabilities
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+ uint16_t *buf = (uint16_t *)buffer;
+ uint32_t sum = 0;
+ uint16_t result;
+
+ for (sum = 0; len > 1; len -= 2)
+ sum += *buf++;
+
+ if (len == 1)
+ sum += *(unsigned char *)buf;
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ result = ~sum;
+
+ return result;
+}
+
#endif /* _DPAA_SEC_H_ */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
index dfe69757..992a79f5 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_log.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
* Copyright NXP 2017.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DPAA_SEC_LOG_H_
diff --git a/drivers/crypto/kasumi/Makefile b/drivers/crypto/kasumi/Makefile
index cf56b7a0..cafe9498 100644
--- a/drivers/crypto/kasumi/Makefile
+++ b/drivers/crypto/kasumi/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index f5db5e32..356621d4 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_common.h>
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
index 4da12f37..a388dbb6 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <string.h>
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
index 5f7044b8..a397bee6 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#ifndef _RTE_KASUMI_PMD_PRIVATE_H_
@@ -72,7 +44,7 @@ struct kasumi_private {
struct kasumi_qp {
uint16_t id;
/**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_LEN];
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
/**< Unique Queue Pair Name */
struct rte_ring *processed_ops;
/**< Ring for placing processed ops */
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
new file mode 100644
index 00000000..17041ad8
--- /dev/null
+++ b/drivers/crypto/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['qat', 'null', 'openssl']
+std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps
+config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/crypto/mrvl/Makefile b/drivers/crypto/mrvl/Makefile
index 3532f7cf..bc5c2270 100644
--- a/drivers/crypto/mrvl/Makefile
+++ b/drivers/crypto/mrvl/Makefile
@@ -47,13 +47,14 @@ LIB = librte_pmd_mrvl_crypto.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -I$(LIBMUSDK_PATH)/include
-CFLAGS += -DMVCONF_ARCH_DMA_ADDR_T_64BIT
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
# library version
LIBABIVER := 1
# versioning export map
-EXPORT_MAP := rte_mrvl_pmd_version.map
+EXPORT_MAP := rte_pmd_mrvl_version.map
# external library dependencies
LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
diff --git a/drivers/crypto/mrvl/rte_mrvl_compat.h b/drivers/crypto/mrvl/rte_mrvl_compat.h
index c29fa105..22cd1840 100644
--- a/drivers/crypto/mrvl/rte_mrvl_compat.h
+++ b/drivers/crypto/mrvl/rte_mrvl_compat.h
@@ -43,6 +43,7 @@
#ifdef container_of
#undef container_of
#endif
+#include "env/mv_autogen_comp_flags.h"
#include "drivers/mv_sam.h"
#include "drivers/mv_sam_cio.h"
#include "drivers/mv_sam_session.h"
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c b/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
index 434cf850..a1de33ae 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
+++ b/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
@@ -558,7 +558,7 @@ mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
int socket_id, struct rte_mempool *session_pool)
{
struct mrvl_crypto_qp *qp = NULL;
- char match[RTE_CRYPTODEV_NAME_LEN];
+ char match[RTE_CRYPTODEV_NAME_MAX_LEN];
unsigned int n;
/* Allocate the queue pair data structure. */
diff --git a/drivers/crypto/null/Makefile b/drivers/crypto/null/Makefile
index 49ada097..9e6400c1 100644
--- a/drivers/crypto/null/Makefile
+++ b/drivers/crypto/null/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/null/meson.build b/drivers/crypto/null/meson.build
new file mode 100644
index 00000000..502336da
--- /dev/null
+++ b/drivers/crypto/null/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+deps += 'bus_vdev'
+name = 'null_crypto'
+sources = files('null_crypto_pmd.c', 'null_crypto_pmd_ops.c')
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index f031d3b8..dc8e5776 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_common.h>
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
index c427050a..f8e5f61f 100644
--- a/drivers/crypto/null/null_crypto_pmd_ops.c
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <string.h>
diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h
index 7c7852fb..0fd13362 100644
--- a/drivers/crypto/null/null_crypto_pmd_private.h
+++ b/drivers/crypto/null/null_crypto_pmd_private.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
*/
#ifndef _NULL_CRYPTO_PMD_PRIVATE_H_
@@ -67,7 +39,7 @@ struct null_crypto_private {
struct null_crypto_qp {
uint16_t id;
/**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_LEN];
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
/**< Unique Queue Pair Name */
struct rte_ring *processed_pkts;
/**< Ring for placing process packets */
diff --git a/drivers/crypto/openssl/Makefile b/drivers/crypto/openssl/Makefile
index 1a006432..8fe086b9 100644
--- a/drivers/crypto/openssl/Makefile
+++ b/drivers/crypto/openssl/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/openssl/meson.build b/drivers/crypto/openssl/meson.build
new file mode 100644
index 00000000..c2a0dd8b
--- /dev/null
+++ b/drivers/crypto/openssl/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+dep = dependency('libcrypto', required: false)
+if not dep.found()
+ build = false
+endif
+deps += 'bus_vdev'
+sources = files('rte_openssl_pmd.c', 'rte_openssl_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lcrypto'
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 06e1a6de..0d9bedc0 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_common.h>
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index c5722399..1cb87d59 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <string.h>
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h
index 26bf862c..bc8dc7cd 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_private.h
+++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#ifndef _OPENSSL_PMD_PRIVATE_H_
@@ -98,7 +70,7 @@ struct openssl_private {
struct openssl_qp {
uint16_t id;
/**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_LEN];
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
/**< Unique Queue Pair Name */
struct rte_ring *processed_ops;
/**< Ring for placing process packets */
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index 745d0197..260912dc 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
new file mode 100644
index 00000000..7b904630
--- /dev/null
+++ b/drivers/crypto/qat/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+dep = dependency('libcrypto', required: false)
+if not dep.found()
+ build = false
+endif
+sources = files('qat_crypto.c', 'qat_qp.c',
+ 'qat_adf/qat_algs_build_desc.c',
+ 'rte_qat_cryptodev.c')
+includes += include_directories('qat_adf')
+deps += ['bus_pci']
+ext_deps += dep
+pkgconfig_extra_libs += '-lcrypto'
diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
index d218f85d..4f8f3d13 100644
--- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
@@ -80,6 +80,7 @@
#define ADF_RING_NEAR_WATERMARK_512 0x08
#define ADF_RING_NEAR_WATERMARK_0 0x00
#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+#define ADF_RING_EMPTY_SIG_BYTE 0x7F
/* Valid internal ring size values */
#define ADF_RING_SIZE_128 0x01
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index db6c9a32..26f854c2 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -359,6 +359,11 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
in = rte_zmalloc("working mem for key",
ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+ if (in == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
rte_memcpy(in, qat_aes_xcbc_key_seed,
ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
@@ -389,6 +394,11 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
ICP_QAT_HW_GALOIS_E_CTR0_SZ);
in = rte_zmalloc("working mem for key",
ICP_QAT_HW_GALOIS_H_SZ, 16);
+ if (in == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
&enc_key) != 0) {
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index a572967c..4afe159d 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
*/
#include <stdio.h>
@@ -69,6 +40,10 @@
#include "adf_transport_access_macros.h"
#define BYTE_LENGTH 8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
static int
qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
@@ -121,16 +96,16 @@ bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
{
EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
int encrypted_ivlen;
- uint8_t encrypted_iv[16];
- int i;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
/* ECB method: encrypt the IV, then XOR this with plaintext */
if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
<= 0)
goto cipher_encrypt_err;
- for (i = 0; i < srclen; i++)
- *(dst+i) = *(src+i)^(encrypted_iv[i]);
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
return 0;
@@ -150,21 +125,21 @@ bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
{
EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
int encrypted_ivlen;
- uint8_t encrypted_iv[16];
- int i;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
<= 0)
goto cipher_decrypt_err;
- for (i = 0; i < srclen; i++)
- *(dst+i) = *(src+i)^(encrypted_iv[i]);
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
return 0;
cipher_decrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
+ PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
return -EINVAL;
}
@@ -844,7 +819,7 @@ static inline uint32_t
qat_bpicipher_preprocess(struct qat_session *ctx,
struct rte_crypto_op *op)
{
- uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
uint8_t last_block_len = block_len > 0 ?
sym_op->cipher.data.length % block_len : 0;
@@ -899,7 +874,7 @@ static inline uint32_t
qat_bpicipher_postprocess(struct qat_session *ctx,
struct rte_crypto_op *op)
{
- uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
uint8_t last_block_len = block_len > 0 ?
sym_op->cipher.data.length % block_len : 0;
@@ -1035,10 +1010,10 @@ void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
void *cur_desc = (uint8_t *)q->base_addr + old_head;
if (new_head < old_head) {
- memset(cur_desc, ADF_RING_EMPTY_SIG, max_head - old_head);
- memset(q->base_addr, ADF_RING_EMPTY_SIG, new_head);
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
+ memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
} else {
- memset(cur_desc, ADF_RING_EMPTY_SIG, new_head - old_head);
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
}
q->nb_processed_responses = 0;
q->csr_head = new_head;
@@ -1367,7 +1342,9 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
}
min_ofs = auth_ofs;
- auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
+ if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+ auth_param->auth_res_addr =
+ op->sym->auth.digest.phys_addr;
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c64d7756..c182af2e 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2016 Intel Corporation
*/
#ifndef _QAT_CRYPTO_H_
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index 89ba27d4..37a6b7cb 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#ifndef _QAT_CRYPTO_CAPABILITIES_H_
diff --git a/drivers/crypto/qat/qat_logs.h b/drivers/crypto/qat/qat_logs.h
index a909f630..565089a4 100644
--- a/drivers/crypto/qat/qat_logs.h
+++ b/drivers/crypto/qat/qat_logs.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _QAT_LOGS_H_
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index ced3aa6a..87b9ce0b 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <rte_common.h>
@@ -180,6 +151,11 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
RTE_CACHE_LINE_SIZE);
+ if (qp->op_cookies == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
+ rte_free(qp);
+ return -ENOMEM;
+ }
qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
qp->inflights16 = 0;
@@ -221,7 +197,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
for (i = 0; i < qp->nb_descriptors; i++) {
if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
- return -EFAULT;
+ goto create_err;
}
struct qat_crypto_op_cookie *sql_cookie =
@@ -246,6 +222,9 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
return 0;
create_err:
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+ rte_free(qp->op_cookies);
rte_free(qp);
return -EFAULT;
}
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 4f8e4bfe..bf837401 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation
*/
#include <rte_bus_pci.h>
diff --git a/drivers/crypto/scheduler/Makefile b/drivers/crypto/scheduler/Makefile
index 123b0f6d..a9514e33 100644
--- a/drivers/crypto/scheduler/Makefile
+++ b/drivers/crypto/scheduler/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index df8634ad..140c8b41 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <rte_reorder.h>
#include <rte_cryptodev.h>
@@ -467,8 +439,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
RTE_CRYPTODEV_NAME_MAX_LEN);
return -EINVAL;
}
- strncpy(sched_ctx->name, scheduler->name,
- RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+ snprintf(sched_ctx->name, sizeof(sched_ctx->name), "%s",
+ scheduler->name);
if (strlen(scheduler->description) >
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
@@ -477,8 +449,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
return -EINVAL;
}
- strncpy(sched_ctx->description, scheduler->description,
- RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
+ snprintf(sched_ctx->description, sizeof(sched_ctx->description), "%s",
+ scheduler->description);
/* load scheduler instance operations functions */
sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index df22f2a9..01e7646c 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#ifndef _RTE_CRYPTO_SCHEDULER_H
@@ -227,7 +198,7 @@ int
rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
/**
- * Get the the attached slaves' count and/or ID
+ * Get the attached slaves' count and/or ID
*
* @param scheduler_id
* The target scheduler device ID
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
index 719165c3..c4369589 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#ifndef _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index 2aa13f8e..005b1638 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <rte_cryptodev.h>
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index 0cd5bce5..b2ce44ce 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <unistd.h>
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 1dd1bc32..96bf0161 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <rte_cryptodev.h>
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index acdf6361..51a85fa6 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <rte_common.h>
#include <rte_hexdump.h>
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index d9b52352..680c2afb 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <string.h>
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index e606716a..dd7ca5a4 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#ifndef _SCHEDULER_PMD_PRIVATE_H
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index 4a847281..c6e03e21 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <rte_cryptodev.h>
diff --git a/drivers/crypto/snow3g/Makefile b/drivers/crypto/snow3g/Makefile
index 183c6ce9..ee5027d0 100644
--- a/drivers/crypto/snow3g/Makefile
+++ b/drivers/crypto/snow3g/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 4cc9a94f..27af69f2 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_common.h>
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
index 157a2de3..f60b4759 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <string.h>
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
index 7b9729f0..eea900e0 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#ifndef _RTE_SNOW3G_PMD_PRIVATE_H_
@@ -72,7 +44,7 @@ struct snow3g_private {
struct snow3g_qp {
uint16_t id;
/**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_LEN];
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
/**< Unique Queue Pair Name */
struct rte_ring *processed_ops;
/**< Ring for placing processed ops */
diff --git a/drivers/crypto/zuc/Makefile b/drivers/crypto/zuc/Makefile
index af77bc8a..68d84eeb 100644
--- a/drivers/crypto/zuc/Makefile
+++ b/drivers/crypto/zuc/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index 590224bf..ad65b80c 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_common.h>
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
index 243c0991..8abac898 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <string.h>
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h
index a57b8cd0..b83c4a04 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_private.h
+++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#ifndef _RTE_ZUC_PMD_PRIVATE_H_
@@ -73,7 +45,7 @@ struct zuc_private {
struct zuc_qp {
uint16_t id;
/**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_LEN];
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
/**< Unique Queue Pair Name */
struct rte_ring *processed_ops;
/**< Ring for placing processed ops */
diff --git a/drivers/event/Makefile b/drivers/event/Makefile
index 1f9c0ba2..c3d89a15 100644
--- a/drivers/event/Makefile
+++ b/drivers/event/Makefile
@@ -1,39 +1,14 @@
-# BSD LICENSE
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Cavium, Inc
#
-# Copyright(c) 2016 Cavium, Inc. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Cavium, Inc nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/event/dpaa/Makefile b/drivers/event/dpaa/Makefile
new file mode 100644
index 00000000..ddd85522
--- /dev/null
+++ b/drivers/event/dpaa/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+RTE_SDK_DPAA=$(RTE_SDK)/drivers/net/dpaa
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa_event.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS := -I$(SRCDIR) $(CFLAGS)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
+CFLAGS += -I$(RTE_SDK_DPAA)/
+CFLAGS += -I$(RTE_SDK_DPAA)/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
+
+EXPORT_MAP := rte_pmd_dpaa_event_version.map
+
+LIBABIVER := 1
+
+# Interfaces with DPDK
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa_eventdev.c
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_eventdev -lrte_pmd_dpaa -lrte_bus_vdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
new file mode 100644
index 00000000..00068015
--- /dev/null
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -0,0 +1,655 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/epoll.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_dpaa_bus.h>
+#include <rte_dpaa_logs.h>
+#include <rte_cycles_64.h>
+
+#include <dpaa_ethdev.h>
+#include "dpaa_eventdev.h"
+#include <dpaa_mempool.h>
+
+/*
+ * Clarifications
+ * Evendev = Virtual Instance for SoC
+ * Eventport = Portal Instance
+ * Eventqueue = Channel Instance
+ * 1 Eventdev can have N Eventqueue
+ */
+
+static int
+dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ uint64_t cycles_per_second;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ cycles_per_second = rte_get_timer_hz();
+ *timeout_ticks = ns * (cycles_per_second / NS_PER_S);
+
+ return 0;
+}
+
+static void
+dpaa_eventq_portal_add(u16 ch_id)
+{
+ uint32_t sdqcr;
+
+ sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
+ qman_static_dequeue_add(sdqcr, NULL);
+}
+
+static uint16_t
+dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ uint16_t i;
+ struct rte_mbuf *mbuf;
+
+ RTE_SET_USED(port);
+ /*Release all the contexts saved previously*/
+ for (i = 0; i < nb_events; i++) {
+ switch (ev[i].op) {
+ case RTE_EVENT_OP_RELEASE:
+ qman_dca_index(ev[i].impl_opaque, 0);
+ mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
+ mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
+ DPAA_PER_LCORE_DQRR_SIZE--;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return nb_events;
+}
+
+static uint16_t
+dpaa_event_enqueue(void *port, const struct rte_event *ev)
+{
+ return dpaa_event_enqueue_burst(port, ev, 1);
+}
+
+static uint16_t
+dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ int ret;
+ u16 ch_id;
+ void *buffers[8];
+ u32 num_frames, i;
+ uint64_t wait_time, cur_ticks, start_ticks;
+ struct dpaa_port *portal = (struct dpaa_port *)port;
+ struct rte_mbuf *mbuf;
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ /* Affine current thread context to a qman portal */
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_EVENTDEV_ERR("Unable to initialize portal");
+ return ret;
+ }
+ }
+
+ if (unlikely(!portal->is_port_linked)) {
+ /*
+ * Affine event queue for current thread context
+ * to a qman portal.
+ */
+ for (i = 0; i < portal->num_linked_evq; i++) {
+ ch_id = portal->evq_info[i].ch_id;
+ dpaa_eventq_portal_add(ch_id);
+ }
+ portal->is_port_linked = true;
+ }
+
+ /* Check if there are atomic contexts to be released */
+ i = 0;
+ while (DPAA_PER_LCORE_DQRR_SIZE) {
+ if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
+ qman_dca_index(i, 0);
+ mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
+ mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
+ DPAA_PER_LCORE_DQRR_SIZE--;
+ }
+ i++;
+ }
+ DPAA_PER_LCORE_DQRR_HELD = 0;
+
+ if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
+ wait_time = timeout_ticks;
+ else
+ wait_time = portal->timeout;
+
+ /* Lets dequeue the frames */
+ start_ticks = rte_get_timer_cycles();
+ wait_time += start_ticks;
+ do {
+ num_frames = qman_portal_dequeue(ev, nb_events, buffers);
+ if (num_frames != 0)
+ break;
+ cur_ticks = rte_get_timer_cycles();
+ } while (cur_ticks < wait_time);
+
+ return num_frames;
+}
+
+static uint16_t
+dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+ return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
+}
+
+static void
+dpaa_event_dev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ dev_info->driver_name = "event_dpaa";
+ dev_info->min_dequeue_timeout_ns =
+ DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_dequeue_timeout_ns =
+ DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
+ dev_info->dequeue_timeout_ns =
+ DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_event_queues =
+ DPAA_EVENT_MAX_QUEUES;
+ dev_info->max_event_queue_flows =
+ DPAA_EVENT_MAX_QUEUE_FLOWS;
+ dev_info->max_event_queue_priority_levels =
+ DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
+ dev_info->max_event_priority_levels =
+ DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
+ dev_info->max_event_ports =
+ DPAA_EVENT_MAX_EVENT_PORT;
+ dev_info->max_event_port_dequeue_depth =
+ DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ dev_info->max_event_port_enqueue_depth =
+ DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ /*
+ * TODO: Need to find out that how to fetch this info
+ * from kernel or somewhere else.
+ */
+ dev_info->max_num_events =
+ DPAA_EVENT_MAX_NUM_EVENTS;
+ dev_info->event_dev_cap =
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+}
+
+static int
+dpaa_event_dev_configure(const struct rte_eventdev *dev)
+{
+ struct dpaa_eventdev *priv = dev->data->dev_private;
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+ int ret, i;
+ uint32_t *ch_id;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+ priv->nb_events_limit = conf->nb_events_limit;
+ priv->nb_event_queues = conf->nb_event_queues;
+ priv->nb_event_ports = conf->nb_event_ports;
+ priv->nb_event_queue_flows = conf->nb_event_queue_flows;
+ priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
+ priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
+ priv->event_dev_cfg = conf->event_dev_cfg;
+
+ /* Check dequeue timeout method is per dequeue or global */
+ if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ /*
+ * Use timeout value as given in dequeue operation.
+ * So invalidating this timetout value.
+ */
+ priv->dequeue_timeout_ns = 0;
+ }
+
+ ch_id = rte_malloc("dpaa-channels",
+ sizeof(uint32_t) * priv->nb_event_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (ch_id == NULL) {
+ EVENTDEV_DRV_ERR("Fail to allocate memory for dpaa channels\n");
+ return -ENOMEM;
+ }
+ /* Create requested event queues within the given event device */
+ ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
+ if (ret < 0) {
+ EVENTDEV_DRV_ERR("Failed to create internal channel\n");
+ rte_free(ch_id);
+ return ret;
+ }
+ for (i = 0; i < priv->nb_event_queues; i++)
+ priv->evq_info[i].ch_id = (u16)ch_id[i];
+
+ /* Lets prepare event ports */
+ memset(&priv->ports[0], 0,
+ sizeof(struct dpaa_port) * priv->nb_event_ports);
+ if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ for (i = 0; i < priv->nb_event_ports; i++) {
+ priv->ports[i].timeout =
+ DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
+ }
+ } else if (priv->dequeue_timeout_ns == 0) {
+ for (i = 0; i < priv->nb_event_ports; i++) {
+ dpaa_event_dequeue_timeout_ticks(NULL,
+ DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
+ &priv->ports[i].timeout);
+ }
+ } else {
+ for (i = 0; i < priv->nb_event_ports; i++) {
+ dpaa_event_dequeue_timeout_ticks(NULL,
+ priv->dequeue_timeout_ns,
+ &priv->ports[i].timeout);
+ }
+ }
+ /*
+ * TODO: Currently portals are affined with threads. Maximum threads
+ * can be created equals to number of lcore.
+ */
+ rte_free(ch_id);
+ EVENTDEV_DRV_LOG("Configured eventdev devid=%d", dev->data->dev_id);
+
+ return 0;
+}
+
+static int
+dpaa_event_dev_start(struct rte_eventdev *dev)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa_event_dev_stop(struct rte_eventdev *dev)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+ RTE_SET_USED(dev);
+}
+
+static int
+dpaa_event_dev_close(struct rte_eventdev *dev)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
+ queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+}
+
+static int
+dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct dpaa_eventdev *priv = dev->data->dev_private;
+ struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ switch (queue_conf->schedule_type) {
+ case RTE_SCHED_TYPE_PARALLEL:
+ case RTE_SCHED_TYPE_ATOMIC:
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ EVENTDEV_DRV_ERR("Schedule type is not supported.");
+ return -1;
+ }
+ evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+ evq_info->event_queue_id = queue_id;
+
+ return 0;
+}
+
+static void
+dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static void
+dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
+ port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+}
+
+static int
+dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct dpaa_eventdev *eventdev = dev->data->dev_private;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(port_conf);
+ dev->data->ports[port_id] = &eventdev->ports[port_id];
+
+ return 0;
+}
+
+static void
+dpaa_event_port_release(void *port)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(port);
+}
+
+static int
+dpaa_event_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct dpaa_eventdev *priv = dev->data->dev_private;
+ struct dpaa_port *event_port = (struct dpaa_port *)port;
+ struct dpaa_eventq *event_queue;
+ uint8_t eventq_id;
+ int i;
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(priorities);
+
+ /* First check that input configuration are valid */
+ for (i = 0; i < nb_links; i++) {
+ eventq_id = queues[i];
+ event_queue = &priv->evq_info[eventq_id];
+ if ((event_queue->event_queue_cfg
+ & RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
+ && (event_queue->event_port)) {
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < nb_links; i++) {
+ eventq_id = queues[i];
+ event_queue = &priv->evq_info[eventq_id];
+ event_port->evq_info[i].event_queue_id = eventq_id;
+ event_port->evq_info[i].ch_id = event_queue->ch_id;
+ event_queue->event_port = port;
+ }
+
+ event_port->num_linked_evq = event_port->num_linked_evq + i;
+
+ return (int)i;
+}
+
+static int
+dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_links)
+{
+ int i;
+ uint8_t eventq_id;
+ struct dpaa_eventq *event_queue;
+ struct dpaa_eventdev *priv = dev->data->dev_private;
+ struct dpaa_port *event_port = (struct dpaa_port *)port;
+
+ if (!event_port->num_linked_evq)
+ return nb_links;
+
+ for (i = 0; i < nb_links; i++) {
+ eventq_id = queues[i];
+ event_port->evq_info[eventq_id].event_queue_id = -1;
+ event_port->evq_info[eventq_id].ch_id = 0;
+ event_queue = &priv->evq_info[eventq_id];
+ event_queue->event_port = NULL;
+ }
+
+ event_port->num_linked_evq = event_port->num_linked_evq - i;
+
+ return (int)i;
+}
+
+static int
+dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ const char *ethdev_driver = eth_dev->device->driver->name;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ if (!strcmp(ethdev_driver, "net_dpaa"))
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+
+ return 0;
+}
+
+static int
+dpaa_event_eth_rx_adapter_queue_add(
+ const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa_eventdev *eventdev = dev->data->dev_private;
+ uint8_t ev_qid = queue_conf->ev.queue_id;
+ u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
+ struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
+ int ret, i;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
+ ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
+ queue_conf);
+ if (ret) {
+ EVENTDEV_DRV_ERR(
+ "Event Queue attach failed:%d\n", ret);
+ goto detach_configured_queues;
+ }
+ }
+ return 0;
+ }
+
+ ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
+ if (ret)
+ EVENTDEV_DRV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
+ return ret;
+
+detach_configured_queues:
+
+ for (i = (i - 1); i >= 0 ; i--)
+ dpaa_eth_eventq_detach(eth_dev, i);
+
+ return ret;
+}
+
+static int
+dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ int ret, i;
+ struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
+
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
+ ret = dpaa_eth_eventq_detach(eth_dev, i);
+ if (ret)
+ EVENTDEV_DRV_ERR(
+ "Event Queue detach failed:%d\n", ret);
+ }
+
+ return 0;
+ }
+
+ ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
+ if (ret)
+ EVENTDEV_DRV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
+ return ret;
+}
+
+static int
+dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static int
+dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ EVENTDEV_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static const struct rte_eventdev_ops dpaa_eventdev_ops = {
+ .dev_infos_get = dpaa_event_dev_info_get,
+ .dev_configure = dpaa_event_dev_configure,
+ .dev_start = dpaa_event_dev_start,
+ .dev_stop = dpaa_event_dev_stop,
+ .dev_close = dpaa_event_dev_close,
+ .queue_def_conf = dpaa_event_queue_def_conf,
+ .queue_setup = dpaa_event_queue_setup,
+ .queue_release = dpaa_event_queue_release,
+ .port_def_conf = dpaa_event_port_default_conf_get,
+ .port_setup = dpaa_event_port_setup,
+ .port_release = dpaa_event_port_release,
+ .port_link = dpaa_event_port_link,
+ .port_unlink = dpaa_event_port_unlink,
+ .timeout_ticks = dpaa_event_dequeue_timeout_ticks,
+ .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get,
+ .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add,
+ .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del,
+ .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start,
+ .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop,
+};
+
+static int
+dpaa_event_dev_create(const char *name)
+{
+ struct rte_eventdev *eventdev;
+ struct dpaa_eventdev *priv;
+
+ eventdev = rte_event_pmd_vdev_init(name,
+ sizeof(struct dpaa_eventdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ EVENTDEV_DRV_ERR("Failed to create eventdev vdev %s", name);
+ goto fail;
+ }
+
+ eventdev->dev_ops = &dpaa_eventdev_ops;
+ eventdev->enqueue = dpaa_event_enqueue;
+ eventdev->enqueue_burst = dpaa_event_enqueue_burst;
+ eventdev->dequeue = dpaa_event_dequeue;
+ eventdev->dequeue_burst = dpaa_event_dequeue_burst;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ priv = eventdev->data->dev_private;
+ priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int
+dpaa_event_dev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ EVENTDEV_DRV_LOG("Initializing %s", name);
+
+ return dpaa_event_dev_create(name);
+}
+
+static int
+dpaa_event_dev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ EVENTDEV_DRV_LOG("Closing %s", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
+ .probe = dpaa_event_dev_probe,
+ .remove = dpaa_event_dev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
new file mode 100644
index 00000000..918fe35c
--- /dev/null
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef __DPAA_EVENTDEV_H__
+#define __DPAA_EVENTDEV_H__
+
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include <rte_per_lcore.h>
+
+#define EVENTDEV_NAME_DPAA_PMD event_dpaa1
+
+#define EVENTDEV_DRV_LOG(fmt, args...) \
+ DPAA_EVENTDEV_INFO(fmt, ## args)
+#define EVENTDEV_DRV_FUNC_TRACE() \
+ DPAA_EVENTDEV_DEBUG("%s() Called:\n", __func__)
+#define EVENTDEV_DRV_ERR(fmt, args...) \
+ DPAA_EVENTDEV_ERR("%s(): " fmt "\n", __func__, ## args)
+
+#define DPAA_EVENT_MAX_PORTS 8
+#define DPAA_EVENT_MAX_QUEUES 16
+#define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT 1
+#define DPAA_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
+#define DPAA_EVENT_MAX_QUEUE_FLOWS 2048
+#define DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
+#define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
+#define DPAA_EVENT_MAX_EVENT_PORT RTE_MAX_LCORE
+#define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100UL
+#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID ((uint64_t)-1)
+#define DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH 1
+#define DPAA_EVENT_MAX_NUM_EVENTS (INT32_MAX - 1)
+
+#define DPAA_EVENT_DEV_CAP \
+do { \
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | \
+ RTE_EVENT_DEV_CAP_BURST_MODE; \
+} while (0)
+
+#define DPAA_EVENT_QUEUE_ATOMIC_FLOWS 0
+#define DPAA_EVENT_QUEUE_ORDER_SEQUENCES 2048
+
+#define RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP \
+ (RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT | \
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ | \
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID)
+
+struct dpaa_eventq {
+ /* Channel Id */
+ uint16_t ch_id;
+ /* Configuration provided by the user */
+ uint32_t event_queue_cfg;
+ uint32_t event_queue_id;
+ /* Event port */
+ void *event_port;
+};
+
+struct dpaa_port {
+ struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
+ uint8_t num_linked_evq;
+ uint8_t is_port_linked;
+ uint64_t timeout;
+};
+
+struct dpaa_eventdev {
+ struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
+ struct dpaa_port ports[DPAA_EVENT_MAX_PORTS];
+ uint32_t dequeue_timeout_ns;
+ uint32_t nb_events_limit;
+ uint8_t max_event_queues;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint8_t resvd;
+ uint32_t nb_event_queue_flows;
+ uint32_t nb_event_port_dequeue_depth;
+ uint32_t nb_event_port_enqueue_depth;
+ uint32_t event_dev_cfg;
+};
+#endif /* __DPAA_EVENTDEV_H__ */
diff --git a/drivers/event/dpaa/rte_pmd_dpaa_event_version.map b/drivers/event/dpaa/rte_pmd_dpaa_event_version.map
new file mode 100644
index 00000000..179140fb
--- /dev/null
+++ b/drivers/event/dpaa/rte_pmd_dpaa_event_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+ local: *;
+};
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index f34eebfa..b26862cd 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright 2017 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of NXP nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
#
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index eeeb2312..c3e6fbff 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
@@ -52,7 +26,7 @@
#include <rte_memory.h>
#include <rte_pci.h>
#include <rte_bus_vdev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_event_eth_rx_adapter.h>
#include <fslmc_vfio.h>
@@ -61,6 +35,7 @@
#include <dpaa2_hw_dpio.h>
#include <dpaa2_ethdev.h>
#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
#include <portal/dpaa2_hw_pvt.h>
#include <mc/fsl_dpci.h>
@@ -72,6 +47,9 @@
* Soft Event Flow is DPCI Instance
*/
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_event;
+
static uint16_t
dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
@@ -94,7 +72,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- PMD_DRV_LOG(ERR, "Failure in affining portal\n");
+ DPAA2_EVENTDEV_ERR("Failure in affining portal\n");
return 0;
}
}
@@ -121,13 +99,13 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
- if (event->impl_opaque) {
- uint8_t dqrr_index = event->impl_opaque - 1;
+ if (event->mbuf->seqn) {
+ uint8_t dqrr_index = event->mbuf->seqn - 1;
qbman_eq_desc_set_dca(&eqdesc[loop], 1,
dqrr_index, 0);
- DPAA2_PER_LCORE_DPIO->dqrr_size--;
- DPAA2_PER_LCORE_DPIO->dqrr_held &=
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &=
~(1 << dqrr_index);
}
@@ -144,7 +122,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
if (!loop)
return num_tx;
frames_to_send = loop;
- PMD_DRV_LOG(ERR, "Unable to allocate memory");
+ DPAA2_EVENTDEV_ERR("Unable to allocate memory");
goto send_partial;
}
rte_memcpy(ev_temp, event, sizeof(struct rte_event));
@@ -189,9 +167,9 @@ RETRY:
* case to avoid the problem.
*/
if (errno == EINTR) {
- PMD_DRV_LOG(DEBUG, "epoll_wait fails\n");
+ DPAA2_EVENTDEV_DEBUG("epoll_wait fails\n");
if (i++ > 10)
- PMD_DRV_LOG(DEBUG, "Dequeue burst Failed\n");
+ DPAA2_EVENTDEV_DEBUG("Dequeue burst Failed\n");
goto RETRY;
}
}
@@ -229,9 +207,9 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
rte_free(ev_temp);
- ev->impl_opaque = dqrr_index + 1;
- DPAA2_PER_LCORE_DPIO->dqrr_size++;
- DPAA2_PER_LCORE_DPIO->dqrr_held |= 1 << dqrr_index;
+ ev->mbuf->seqn = dqrr_index + 1;
+ DPAA2_PER_LCORE_DQRR_SIZE++;
+ DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
}
static uint16_t
@@ -249,23 +227,23 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- PMD_DRV_LOG(ERR, "Failure in affining portal\n");
+ DPAA2_EVENTDEV_ERR("Failure in affining portal\n");
return 0;
}
}
-
swp = DPAA2_PER_LCORE_PORTAL;
/* Check if there are atomic contexts to be released */
- while (DPAA2_PER_LCORE_DPIO->dqrr_size) {
- if (DPAA2_PER_LCORE_DPIO->dqrr_held & (1 << i)) {
- dq = qbman_get_dqrr_from_idx(swp, i);
- qbman_swp_dqrr_consume(swp, dq);
- DPAA2_PER_LCORE_DPIO->dqrr_size--;
+ while (DPAA2_PER_LCORE_DQRR_SIZE) {
+ if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
+ qbman_swp_dqrr_idx_consume(swp, i);
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
+ DPAA2_INVALID_MBUF_SEQN;
}
i++;
}
- DPAA2_PER_LCORE_DPIO->dqrr_held = 0;
+ DPAA2_PER_LCORE_DQRR_HELD = 0;
do {
dq = qbman_swp_dqrr_next(swp);
@@ -277,15 +255,15 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
}
return num_pkts;
}
+ qbman_swp_prefetch_dqrr_next(swp);
fd = qbman_result_DQ_fd(dq);
-
rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
if (rxq) {
rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
} else {
qbman_swp_dqrr_consume(swp, dq);
- PMD_DRV_LOG(ERR, "Null Return VQ received\n");
+ DPAA2_EVENTDEV_ERR("Null Return VQ received\n");
return 0;
}
@@ -308,7 +286,7 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
@@ -326,14 +304,18 @@ dpaa2_eventdev_info_get(struct rte_eventdev *dev,
DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
dev_info->max_event_priority_levels =
DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
- dev_info->max_event_ports = RTE_MAX_LCORE;
+ dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
dev_info->max_event_port_dequeue_depth =
DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
dev_info->max_event_port_enqueue_depth =
DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
- RTE_EVENT_DEV_CAP_BURST_MODE;
+ RTE_EVENT_DEV_CAP_BURST_MODE|
+ RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+
}
static int
@@ -342,7 +324,7 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
struct dpaa2_eventdev *priv = dev->data->dev_private;
struct rte_event_dev_config *conf = &dev->data->dev_conf;
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
priv->nb_event_queues = conf->nb_event_queues;
@@ -352,14 +334,15 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
priv->event_dev_cfg = conf->event_dev_cfg;
- PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
+ DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
+ dev->data->dev_id);
return 0;
}
static int
dpaa2_eventdev_start(struct rte_eventdev *dev)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
@@ -369,7 +352,7 @@ dpaa2_eventdev_start(struct rte_eventdev *dev)
static void
dpaa2_eventdev_stop(struct rte_eventdev *dev)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
}
@@ -377,7 +360,7 @@ dpaa2_eventdev_stop(struct rte_eventdev *dev)
static int
dpaa2_eventdev_close(struct rte_eventdev *dev)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
@@ -388,7 +371,7 @@ static void
dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
struct rte_event_queue_conf *queue_conf)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(queue_id);
@@ -403,7 +386,7 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
static void
dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(queue_id);
@@ -417,7 +400,7 @@ dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
struct evq_info_t *evq_info =
&priv->evq_info[queue_id];
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
@@ -428,7 +411,7 @@ static void
dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
struct rte_event_port_conf *port_conf)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(port_id);
@@ -440,12 +423,13 @@ dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
port_conf->enqueue_depth =
DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ port_conf->disable_implicit_release = 0;
}
static void
dpaa2_eventdev_port_release(void *port)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(port);
}
@@ -454,7 +438,7 @@ static int
dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
const struct rte_event_port_conf *port_conf)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(port_conf);
@@ -480,7 +464,7 @@ dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
struct evq_info_t *evq_info;
int i;
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
for (i = 0; i < nb_unlinks; i++) {
evq_info = &priv->evq_info[queues[i]];
@@ -506,7 +490,7 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
uint8_t channel_index;
int ret, i, n;
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
for (i = 0; i < nb_links; i++) {
evq_info = &priv->evq_info[queues[i]];
@@ -518,7 +502,7 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
evq_info->dpcon->dpcon_id, &channel_index);
if (ret < 0) {
- PMD_DRV_ERR("Static dequeue cfg failed with ret: %d\n",
+ DPAA2_EVENTDEV_ERR("Static dequeue cfg failed with ret: %d\n",
ret);
goto err;
}
@@ -551,7 +535,7 @@ dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
{
uint32_t scale = 1;
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
*timeout_ticks = ns * scale;
@@ -562,7 +546,7 @@ dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
static void
dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(f);
@@ -575,7 +559,7 @@ dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
{
const char *ethdev_driver = eth_dev->device->driver->name;
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
@@ -597,13 +581,13 @@ dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
int i, ret;
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
ret = dpaa2_eth_eventq_attach(eth_dev, i,
dpcon_id, queue_conf);
if (ret) {
- PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
+ DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
ret);
goto fail;
}
@@ -627,7 +611,7 @@ dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
int ret;
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
if (rx_queue_id == -1)
return dpaa2_eventdev_eth_queue_add_all(dev,
@@ -636,7 +620,7 @@ dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
dpcon_id, queue_conf);
if (ret) {
- PMD_DRV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
+ DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
return ret;
}
return 0;
@@ -648,14 +632,14 @@ dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
{
int i, ret;
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
ret = dpaa2_eth_eventq_detach(eth_dev, i);
if (ret) {
- PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
+ DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
ret);
return ret;
}
@@ -671,14 +655,14 @@ dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
{
int ret;
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
if (rx_queue_id == -1)
return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
if (ret) {
- PMD_DRV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
+ DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
return ret;
}
@@ -689,7 +673,7 @@ static int
dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(eth_dev);
@@ -701,7 +685,7 @@ static int
dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev)
{
- PMD_DRV_FUNC_TRACE();
+ EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
RTE_SET_USED(eth_dev);
@@ -758,7 +742,7 @@ dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
dpci_dev->token, i,
&rx_queue_cfg);
if (ret) {
- PMD_DRV_LOG(ERR,
+ DPAA2_EVENTDEV_ERR(
"set_rx_q failed with err code: %d", ret);
return ret;
}
@@ -779,7 +763,7 @@ dpaa2_eventdev_create(const char *name)
sizeof(struct dpaa2_eventdev),
rte_socket_id());
if (eventdev == NULL) {
- PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
+ DPAA2_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
goto fail;
}
@@ -813,7 +797,7 @@ dpaa2_eventdev_create(const char *name)
ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
if (ret) {
- PMD_DRV_LOG(ERR,
+ DPAA2_EVENTDEV_ERR(
"dpci setup failed with err code: %d", ret);
return ret;
}
@@ -831,7 +815,7 @@ dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
const char *name;
name = rte_vdev_device_name(vdev);
- PMD_DRV_LOG(INFO, "Initializing %s", name);
+ DPAA2_EVENTDEV_INFO("Initializing %s", name);
return dpaa2_eventdev_create(name);
}
@@ -841,7 +825,7 @@ dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
const char *name;
name = rte_vdev_device_name(vdev);
- PMD_DRV_LOG(INFO, "Closing %s", name);
+ DPAA2_EVENTDEV_INFO("Closing %s", name);
return rte_event_pmd_vdev_uninit(name);
}
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index ae8e07e9..91c8f2a3 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -1,33 +1,8 @@
/*
- * BSD LICENSE
+ * SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DPAA2_EVENTDEV_H__
@@ -41,18 +16,6 @@
#define EVENTDEV_NAME_DPAA2_PMD event_dpaa2
-#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV_DEBUG
-#define PMD_DRV_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
-#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
-#define PMD_DRV_FUNC_TRACE() do { } while (0)
-#endif
-
-#define PMD_DRV_ERR(fmt, args...) \
- RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ## args)
-
#define DPAA2_EVENT_DEFAULT_DPCI_PRIO 0
#define DPAA2_EVENT_MAX_QUEUES 16
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
new file mode 100644
index 00000000..7d250c3f
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -0,0 +1,37 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _DPAA2_EVENTDEV_LOGS_H_
+#define _DPAA2_EVENTDEV_LOGS_H_
+
+extern int dpaa2_logtype_event;
+
+#define DPAA2_EVENTDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_LOG(DEBUG, " >>")
+
+#define DPAA2_EVENTDEV_DEBUG(fmt, args...) \
+ DPAA2_EVENTDEV_LOG(DEBUG, fmt, ## args)
+#define DPAA2_EVENTDEV_INFO(fmt, args...) \
+ DPAA2_EVENTDEV_LOG(INFO, fmt, ## args)
+#define DPAA2_EVENTDEV_ERR(fmt, args...) \
+ DPAA2_EVENTDEV_LOG(ERR, fmt, ## args)
+#define DPAA2_EVENTDEV_WARN(fmt, args...) \
+ DPAA2_EVENTDEV_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_EVENTDEV_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_EVENTDEV_DP_DEBUG(fmt, args...) \
+ DPAA2_EVENTDEV_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_EVENTDEV_DP_INFO(fmt, args...) \
+ DPAA2_EVENTDEV_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_EVENTDEV_DP_WARN(fmt, args...) \
+ DPAA2_EVENTDEV_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_EVENTDEV_LOGS_H_ */
diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
index 005e6234..f2377b98 100644
--- a/drivers/event/dpaa2/dpaa2_hw_dpcon.c
+++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
@@ -44,8 +18,9 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
+#include <fslmc_logs.h>
#include <rte_fslmc.h>
#include <mc/fsl_dpcon.h>
#include <portal/dpaa2_hw_pvt.h>
diff --git a/drivers/event/meson.build b/drivers/event/meson.build
new file mode 100644
index 00000000..d7bc4854
--- /dev/null
+++ b/drivers/event/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['skeleton', 'sw', 'octeontx']
+std_deps = ['eventdev', 'kvargs']
+config_flag_fmt = 'RTE_LIBRTE_@0@_EVENTDEV_PMD'
+driver_name_fmt = 'rte_pmd_@0@_event'
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index fdf1b738..0e49efd8 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Cavium, Inc. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Cavium, Inc nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
#
include $(RTE_SDK)/mk/rte.vars.mk
@@ -41,11 +13,11 @@ CFLAGS += $(WERROR_FLAGS)
CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
-LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx
-LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx -lrte_pmd_octeontx
+LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs
LDLIBS += -lrte_bus_vdev
-EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map
+EXPORT_MAP := rte_pmd_octeontx_event_version.map
LIBABIVER := 1
@@ -54,6 +26,7 @@ LIBABIVER := 1
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev_selftest.c
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays
diff --git a/drivers/event/octeontx/meson.build b/drivers/event/octeontx/meson.build
new file mode 100644
index 00000000..358fc9fc
--- /dev/null
+++ b/drivers/event/octeontx/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = files('ssovf_worker.c',
+ 'ssovf_evdev.c',
+ 'ssovf_evdev_selftest.c'
+)
+
+deps += ['mempool_octeontx', 'bus_vdev', 'pmd_octeontx']
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map b/drivers/event/octeontx/rte_pmd_octeontx_event_version.map
index 5352e7e3..5352e7e3 100644
--- a/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
+++ b/drivers/event/octeontx/rte_pmd_octeontx_event_version.map
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 117b1453..a1086077 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include <inttypes.h>
@@ -36,8 +8,9 @@
#include <rte_debug.h>
#include <rte_dev.h>
#include <rte_eal.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_event_eth_rx_adapter.h>
+#include <rte_kvargs.h>
#include <rte_lcore.h>
#include <rte_log.h>
#include <rte_malloc.h>
@@ -46,6 +19,17 @@
#include "ssovf_evdev.h"
+int otx_logtype_ssovf;
+
+RTE_INIT(otx_ssovf_init_log);
+static void
+otx_ssovf_init_log(void)
+{
+ otx_logtype_ssovf = rte_log_register("pmd.event.octeontx");
+ if (otx_logtype_ssovf >= 0)
+ rte_log_set_level(otx_logtype_ssovf, RTE_LOG_NOTICE);
+}
+
/* SSOPF Mailbox messages */
struct ssovf_mbox_dev_info {
@@ -187,7 +171,11 @@ ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
dev_info->max_num_events = edev->max_num_events;
dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
- RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
+ RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+
}
static int
@@ -252,6 +240,7 @@ ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
port_conf->new_event_threshold = edev->max_num_events;
port_conf->dequeue_depth = 1;
port_conf->enqueue_depth = 1;
+ port_conf->disable_implicit_release = 0;
}
static void
@@ -592,6 +581,15 @@ ssovf_close(struct rte_eventdev *dev)
return 0;
}
+static int
+ssovf_selftest(const char *key __rte_unused, const char *value,
+ void *opaque)
+{
+ int *flag = opaque;
+ *flag = !!atoi(value);
+ return 0;
+}
+
/* Initialize and register event driver with DPDK Application */
static const struct rte_eventdev_ops ssovf_ops = {
.dev_infos_get = ssovf_info_get,
@@ -612,6 +610,8 @@ static const struct rte_eventdev_ops ssovf_ops = {
.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+ .dev_selftest = test_eventdev_octeontx,
+
.dump = ssovf_dump,
.dev_start = ssovf_start,
.dev_stop = ssovf_stop,
@@ -627,7 +627,14 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
struct rte_eventdev *eventdev;
static int ssovf_init_once;
const char *name;
+ const char *params;
int ret;
+ int selftest = 0;
+
+ static const char *const args[] = {
+ SSOVF_SELFTEST_ARG,
+ NULL
+ };
name = rte_vdev_device_name(vdev);
/* More than one instance is not supported */
@@ -636,6 +643,28 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
return -EINVAL;
}
+ params = rte_vdev_device_args(vdev);
+ if (params != NULL && params[0] != '\0') {
+ struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+ if (!kvlist) {
+ ssovf_log_info(
+ "Ignoring unsupported params supplied '%s'",
+ name);
+ } else {
+ int ret = rte_kvargs_process(kvlist,
+ SSOVF_SELFTEST_ARG,
+ ssovf_selftest, &selftest);
+ if (ret != 0) {
+ ssovf_log_err("%s: Error in selftest", name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+ }
+
+ rte_kvargs_free(kvlist);
+ }
+
eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
rte_socket_id());
if (eventdev == NULL) {
@@ -686,6 +715,8 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
edev->max_event_ports);
ssovf_init_once = 1;
+ if (selftest)
+ test_eventdev_octeontx();
return 0;
error:
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index b093a3e7..d1825b4f 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef __SSOVF_EVDEV_H__
@@ -41,22 +13,16 @@
#define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
-#ifdef RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG
-#define ssovf_log_info(fmt, args...) \
- RTE_LOG(INFO, EVENTDEV, "[%s] %s() " fmt "\n", \
- RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
-#define ssovf_log_dbg(fmt, args...) \
- RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() " fmt "\n", \
- RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
-#else
-#define ssovf_log_info(fmt, args...)
-#define ssovf_log_dbg(fmt, args...)
-#endif
+#define SSOVF_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_logtype_ssovf, \
+ "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+#define ssovf_log_info(fmt, ...) SSOVF_LOG(INFO, fmt, ##__VA_ARGS__)
+#define ssovf_log_dbg(fmt, ...) SSOVF_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define ssovf_log_err(fmt, ...) SSOVF_LOG(ERR, fmt, ##__VA_ARGS__)
#define ssovf_func_trace ssovf_log_dbg
-#define ssovf_log_err(fmt, args...) \
- RTE_LOG(ERR, EVENTDEV, "[%s] %s() " fmt "\n", \
- RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+#define ssovf_log_selftest ssovf_log_info
#define SSO_MAX_VHGRP (64)
#define SSO_MAX_VHWS (32)
@@ -114,6 +80,8 @@
#define SSO_GRP_GET_PRIORITY 0x7
#define SSO_GRP_SET_PRIORITY 0x8
+#define SSOVF_SELFTEST_ARG ("selftest")
+
/*
* In Cavium OcteonTX SoC, all accesses to the device registers are
* implictly strongly ordered. So, The relaxed version of IO operation is
@@ -180,6 +148,8 @@ ssovf_pmd_priv(const struct rte_eventdev *eventdev)
return eventdev->data->dev_private;
}
+extern int otx_logtype_ssovf;
+
uint16_t ssows_enq(void *port, const struct rte_event *ev);
uint16_t ssows_enq_burst(void *port,
const struct rte_event ev[], uint16_t nb_events);
@@ -196,5 +166,6 @@ uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks);
void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
void ssows_reset(struct ssows *ws);
+int test_eventdev_octeontx(void);
#endif /* __SSOVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/ssovf_evdev_selftest.c b/drivers/event/octeontx/ssovf_evdev_selftest.c
new file mode 100644
index 00000000..5e012a95
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_evdev_selftest.c
@@ -0,0 +1,1487 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_bus_vdev.h>
+#include <rte_test.h>
+
+#include "ssovf_evdev.h"
+
+#define NUM_PACKETS (1 << 18)
+#define MAX_EVENTS (16 * 1024)
+
+#define OCTEONTX_TEST_RUN(setup, teardown, test) \
+ octeontx_test_run(setup, teardown, test, #test)
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int evdev;
+static struct rte_mempool *eventdev_test_mempool;
+
+struct event_attr {
+ uint32_t flow_id;
+ uint8_t event_type;
+ uint8_t sub_event_type;
+ uint8_t sched_type;
+ uint8_t queue;
+ uint8_t port;
+};
+
+static uint32_t seqn_list_index;
+static int seqn_list[NUM_PACKETS];
+
+static inline void
+seqn_list_init(void)
+{
+ RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
+ memset(seqn_list, 0, sizeof(seqn_list));
+ seqn_list_index = 0;
+}
+
+static inline int
+seqn_list_update(int val)
+{
+ if (seqn_list_index >= NUM_PACKETS)
+ return -1;
+
+ seqn_list[seqn_list_index++] = val;
+ rte_smp_wmb();
+ return 0;
+}
+
+static inline int
+seqn_list_check(int limit)
+{
+ int i;
+
+ for (i = 0; i < limit; i++) {
+ if (seqn_list[i] != i) {
+ ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+struct test_core_param {
+ rte_atomic32_t *total_events;
+ uint64_t dequeue_tmo_ticks;
+ uint8_t port;
+ uint8_t sched_type;
+};
+
+static int
+testsuite_setup(void)
+{
+ const char *eventdev_name = "event_octeontx";
+
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ ssovf_log_dbg("%d: Eventdev %s not found - creating.",
+ __LINE__, eventdev_name);
+ if (rte_vdev_init(eventdev_name, NULL) < 0) {
+ ssovf_log_dbg("Error creating eventdev %s",
+ eventdev_name);
+ return -1;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ ssovf_log_dbg("Error finding newly created eventdev");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+ rte_event_dev_close(evdev);
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = info->max_event_ports;
+ dev_conf->nb_event_queues = info->max_event_queues;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+enum {
+ TEST_EVENTDEV_SETUP_DEFAULT,
+ TEST_EVENTDEV_SETUP_PRIORITY,
+ TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
+};
+
+static inline int
+_eventdev_setup(int mode)
+{
+ int i, ret;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+ const char *pool_name = "evdev_octeontx_test_pool";
+
+ /* Create and destrory pool for each test case to make it standalone */
+ eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
+ MAX_EVENTS,
+ 0 /*MBUF_CACHE_SIZE*/,
+ 0,
+ 512, /* Use very small mbufs */
+ rte_socket_id());
+ if (!eventdev_test_mempool) {
+ ssovf_log_dbg("ERROR creating mempool");
+ return -1;
+ }
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
+ "ERROR max_num_events=%d < max_events=%d",
+ info.max_num_events, MAX_EVENTS);
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+ if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
+ dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
+
+ ret = rte_event_dev_configure(evdev, &dev_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+ if (queue_count > 8) {
+ ssovf_log_dbg(
+ "test expects the unique priority per queue");
+ return -ENOTSUP;
+ }
+
+ /* Configure event queues(0 to n) with
+ * RTE_EVENT_DEV_PRIORITY_HIGHEST to
+ * RTE_EVENT_DEV_PRIORITY_LOWEST
+ */
+ uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
+ queue_count;
+ for (i = 0; i < (int)queue_count; i++) {
+ struct rte_event_queue_conf queue_conf;
+
+ ret = rte_event_queue_default_conf_get(evdev, i,
+ &queue_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
+ i);
+ queue_conf.priority = i * step;
+ ret = rte_event_queue_setup(evdev, i, &queue_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+ i);
+ }
+
+ } else {
+ /* Configure event queues with default priority */
+ for (i = 0; i < (int)queue_count; i++) {
+ ret = rte_event_queue_setup(evdev, i, NULL);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+ i);
+ }
+ }
+ /* Configure event ports */
+ uint32_t port_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+ for (i = 0; i < (int)port_count; i++) {
+ ret = rte_event_port_setup(evdev, i, NULL);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
+ ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
+ i);
+ }
+
+ ret = rte_event_dev_start(evdev);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+ return 0;
+}
+
+static inline int
+eventdev_setup(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
+}
+
+static inline int
+eventdev_setup_priority(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
+}
+
+static inline int
+eventdev_setup_dequeue_timeout(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
+}
+
+static inline void
+eventdev_teardown(void)
+{
+ rte_event_dev_stop(evdev);
+ rte_mempool_free(eventdev_test_mempool);
+}
+
+static inline void
+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
+ uint32_t flow_id, uint8_t event_type,
+ uint8_t sub_event_type, uint8_t sched_type,
+ uint8_t queue, uint8_t port)
+{
+ struct event_attr *attr;
+
+ /* Store the event attributes in mbuf for future reference */
+ attr = rte_pktmbuf_mtod(m, struct event_attr *);
+ attr->flow_id = flow_id;
+ attr->event_type = event_type;
+ attr->sub_event_type = sub_event_type;
+ attr->sched_type = sched_type;
+ attr->queue = queue;
+ attr->port = port;
+
+ ev->flow_id = flow_id;
+ ev->sub_event_type = sub_event_type;
+ ev->event_type = event_type;
+ /* Inject the new event */
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = sched_type;
+ ev->queue_id = queue;
+ ev->mbuf = m;
+}
+
+static inline int
+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
+ uint8_t sched_type, uint8_t queue, uint8_t port,
+ unsigned int events)
+{
+ struct rte_mbuf *m;
+ unsigned int i;
+
+ for (i = 0; i < events; i++) {
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+ m->seqn = i;
+ update_event_and_validation_attr(m, &ev, flow_id, event_type,
+ sub_event_type, sched_type, queue, port);
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ return 0;
+}
+
+static inline int
+check_excess_events(uint8_t port)
+{
+ int i;
+ uint16_t valid_event;
+ struct rte_event ev;
+
+ /* Check for excess events, try for a few times and exit */
+ for (i = 0; i < 32; i++) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+
+ RTE_TEST_ASSERT_SUCCESS(valid_event,
+ "Unexpected valid event=%d", ev.mbuf->seqn);
+ }
+ return 0;
+}
+
+static inline int
+generate_random_events(const unsigned int total_events)
+{
+ struct rte_event_dev_info info;
+ unsigned int i;
+ int ret;
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ for (i = 0; i < total_events; i++) {
+ ret = inject_events(
+ rte_rand() % info.max_event_queue_flows /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ rte_rand() % queue_count /* queue */,
+ 0 /* port */,
+ 1 /* events */);
+ if (ret)
+ return -1;
+ }
+ return ret;
+}
+
+
+static inline int
+validate_event(struct rte_event *ev)
+{
+ struct event_attr *attr;
+
+ attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+ RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
+ "flow_id mismatch enq=%d deq =%d",
+ attr->flow_id, ev->flow_id);
+ RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
+ "event_type mismatch enq=%d deq =%d",
+ attr->event_type, ev->event_type);
+ RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
+ "sub_event_type mismatch enq=%d deq =%d",
+ attr->sub_event_type, ev->sub_event_type);
+ RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
+ "sched_type mismatch enq=%d deq =%d",
+ attr->sched_type, ev->sched_type);
+ RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
+ "queue mismatch enq=%d deq =%d",
+ attr->queue, ev->queue_id);
+ return 0;
+}
+
+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
+ struct rte_event *ev);
+
+static inline int
+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
+{
+ int ret;
+ uint16_t valid_event;
+ uint32_t events = 0, forward_progress_cnt = 0, index = 0;
+ struct rte_event ev;
+
+ while (1) {
+ if (++forward_progress_cnt > UINT16_MAX) {
+ ssovf_log_dbg("Detected deadlock");
+ return -1;
+ }
+
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ forward_progress_cnt = 0;
+ ret = validate_event(&ev);
+ if (ret)
+ return -1;
+
+ if (fn != NULL) {
+ ret = fn(index, port, &ev);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to validate test specific event");
+ }
+
+ ++index;
+
+ rte_pktmbuf_free(ev.mbuf);
+ if (++events >= total_events)
+ break;
+ }
+
+ return check_excess_events(port);
+}
+
+static int
+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+ RTE_SET_USED(port);
+ RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
+ index, ev->mbuf->seqn);
+ return 0;
+}
+
+static inline int
+test_simple_enqdeq(uint8_t sched_type)
+{
+ int ret;
+
+ ret = inject_events(0 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type */,
+ sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ MAX_EVENTS);
+ if (ret)
+ return -1;
+
+ return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
+}
+
+static int
+test_simple_enqdeq_ordered(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_simple_enqdeq_atomic(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_simple_enqdeq_parallel(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. On dequeue, using single event port(port 0) verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_single_port_deq(void)
+{
+ int ret;
+
+ ret = generate_random_events(MAX_EVENTS);
+ if (ret)
+ return -1;
+
+ return consume_events(0 /* port */, MAX_EVENTS, NULL);
+}
+
+/*
+ * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
+ * operation
+ *
+ * For example, Inject 32 events over 0..7 queues
+ * enqueue events 0, 8, 16, 24 in queue 0
+ * enqueue events 1, 9, 17, 25 in queue 1
+ * ..
+ * ..
+ * enqueue events 7, 15, 23, 31 in queue 7
+ *
+ * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
+ * order from queue0(highest priority) to queue7(lowest_priority)
+ */
+static int
+validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint32_t range = MAX_EVENTS / queue_count;
+ uint32_t expected_val = (index % range) * queue_count;
+
+ expected_val += ev->queue_id;
+ RTE_SET_USED(port);
+ RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
+ "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+ ev->mbuf->seqn, index, expected_val, range,
+ queue_count, MAX_EVENTS);
+ return 0;
+}
+
+static int
+test_multi_queue_priority(void)
+{
+ uint8_t queue;
+ struct rte_mbuf *m;
+ int i, max_evts_roundoff;
+
+ /* See validate_queue_priority() comments for priority validate logic */
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ max_evts_roundoff = MAX_EVENTS / queue_count;
+ max_evts_roundoff *= queue_count;
+
+ for (i = 0; i < max_evts_roundoff; i++) {
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+ m->seqn = i;
+ queue = i % queue_count;
+ update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
+ 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
+ rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ }
+
+ return consume_events(0, max_evts_roundoff, validate_queue_priority);
+}
+
+static int
+worker_multi_port_fn(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ rte_atomic32_t *total_events = param->total_events;
+ int ret;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ ret = validate_event(&ev);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ }
+ return 0;
+}
+
+static inline int
+wait_workers_to_join(int lcore, const rte_atomic32_t *count)
+{
+ uint64_t cycles, print_cycles;
+ RTE_SET_USED(count);
+
+ print_cycles = cycles = rte_get_timer_cycles();
+ while (rte_eal_get_lcore_state(lcore) != FINISHED) {
+ uint64_t new_cycles = rte_get_timer_cycles();
+
+ if (new_cycles - print_cycles > rte_get_timer_hz()) {
+ ssovf_log_dbg("\r%s: events %d", __func__,
+ rte_atomic32_read(count));
+ print_cycles = new_cycles;
+ }
+ if (new_cycles - cycles > rte_get_timer_hz() * 10) {
+ ssovf_log_dbg(
+ "%s: No schedules for seconds, deadlock (%d)",
+ __func__,
+ rte_atomic32_read(count));
+ rte_event_dev_dump(evdev, stdout);
+ cycles = new_cycles;
+ return -1;
+ }
+ }
+ rte_eal_mp_wait_lcore();
+ return 0;
+}
+
+
+static inline int
+launch_workers_and_wait(int (*master_worker)(void *),
+ int (*slave_workers)(void *), uint32_t total_events,
+ uint8_t nb_workers, uint8_t sched_type)
+{
+ uint8_t port = 0;
+ int w_lcore;
+ int ret;
+ struct test_core_param *param;
+ rte_atomic32_t atomic_total_events;
+ uint64_t dequeue_tmo_ticks;
+
+ if (!nb_workers)
+ return 0;
+
+ rte_atomic32_set(&atomic_total_events, total_events);
+ seqn_list_init();
+
+ param = malloc(sizeof(struct test_core_param) * nb_workers);
+ if (!param)
+ return -1;
+
+ ret = rte_event_dequeue_timeout_ticks(evdev,
+ rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
+ if (ret) {
+ free(param);
+ return -1;
+ }
+
+ param[0].total_events = &atomic_total_events;
+ param[0].sched_type = sched_type;
+ param[0].port = 0;
+ param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ rte_smp_wmb();
+
+ w_lcore = rte_get_next_lcore(
+ /* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+
+ for (port = 1; port < nb_workers; port++) {
+ param[port].total_events = &atomic_total_events;
+ param[port].sched_type = sched_type;
+ param[port].port = port;
+ param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ rte_smp_wmb();
+ w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+ rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+ }
+
+ ret = wait_workers_to_join(w_lcore, &atomic_total_events);
+ free(param);
+ return ret;
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. Dequeue the events through multiple ports and verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_multi_port_deq(void)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
+ nr_ports, rte_lcore_count() - 1);
+ return 0;
+ }
+
+ return launch_workers_and_wait(worker_multi_port_fn,
+ worker_multi_port_fn, total_events,
+ nr_ports, 0xff /* invalid */);
+}
+
+static int
+validate_queue_to_port_single_link(uint32_t index, uint8_t port,
+ struct rte_event *ev)
+{
+ RTE_SET_USED(index);
+ RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
+ "queue mismatch enq=%d deq =%d",
+ port, ev->queue_id);
+ return 0;
+}
+
+/*
+ * Link queue x to port x and check correctness of link by checking
+ * queue_id == x on dequeue on the specific port x
+ */
+static int
+test_queue_to_port_single_link(void)
+{
+ int i, nr_links, ret;
+
+ uint32_t port_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count), "Port count get failed");
+
+ /* Unlink all connections that created in eventdev_setup */
+ for (i = 0; i < (int)port_count; i++) {
+ ret = rte_event_port_unlink(evdev, i, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0,
+ "Failed to unlink all queues port=%d", i);
+ }
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+
+ nr_links = RTE_MIN(port_count, queue_count);
+ const unsigned int total_events = MAX_EVENTS / nr_links;
+
+ /* Link queue x to port x and inject events to queue x through port x */
+ for (i = 0; i < nr_links; i++) {
+ uint8_t queue = (uint8_t)i;
+
+ ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
+ RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
+
+ ret = inject_events(
+ 0x100 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ queue /* queue */,
+ i /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+ }
+
+ /* Verify the events generated from correct queue */
+ for (i = 0; i < nr_links; i++) {
+ ret = consume_events(i /* port */, total_events,
+ validate_queue_to_port_single_link);
+ if (ret)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
+ struct rte_event *ev)
+{
+ RTE_SET_USED(index);
+ RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
+ "queue mismatch enq=%d deq =%d",
+ port, ev->queue_id);
+ return 0;
+}
+
+/*
+ * Link all even number of queues to port 0 and all odd number of queues to
+ * port 1 and verify the link connection on dequeue
+ */
+static int
+test_queue_to_port_multi_link(void)
+{
+ int ret, port0_events = 0, port1_events = 0;
+ uint8_t queue, port;
+ uint32_t nr_queues = 0;
+ uint32_t nr_ports = 0;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &nr_queues), "Queue count get failed");
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &nr_queues), "Queue count get failed");
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+
+ if (nr_ports < 2) {
+ ssovf_log_dbg("%s: Not enough ports to test ports=%d",
+ __func__, nr_ports);
+ return 0;
+ }
+
+ /* Unlink all connections that created in eventdev_setup */
+ for (port = 0; port < nr_ports; port++) {
+ ret = rte_event_port_unlink(evdev, port, NULL, 0);
+ RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+ port);
+ }
+
+ const unsigned int total_events = MAX_EVENTS / nr_queues;
+
+ /* Link all even number of queues to port0 and odd numbers to port 1*/
+ for (queue = 0; queue < nr_queues; queue++) {
+ port = queue & 0x1;
+ ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
+ RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
+ queue, port);
+
+ ret = inject_events(
+ 0x100 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ queue /* queue */,
+ port /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+
+ if (port == 0)
+ port0_events += total_events;
+ else
+ port1_events += total_events;
+ }
+
+ ret = consume_events(0 /* port */, port0_events,
+ validate_queue_to_port_multi_link);
+ if (ret)
+ return -1;
+ ret = consume_events(1 /* port */, port1_events,
+ validate_queue_to_port_multi_link);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+static int
+worker_flow_based_pipeline(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint8_t new_sched_type = param->sched_type;
+ rte_atomic32_t *total_events = param->total_events;
+ uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+ dequeue_tmo_ticks);
+ if (!valid_event)
+ continue;
+
+ /* Events from stage 0 */
+ if (ev.sub_event_type == 0) {
+ /* Move to atomic flow to maintain the ordering */
+ ev.flow_id = 0x2;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 1; /* stage 1 */
+ ev.sched_type = new_sched_type;
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
+ if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ssovf_log_dbg("Failed to update seqn_list");
+ return -1;
+ }
+ } else {
+ ssovf_log_dbg("Invalid ev.sub_event_type = %d",
+ ev.sub_event_type);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
+test_multiport_flow_sched_type_test(uint8_t in_sched_type,
+ uint8_t out_sched_type)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
+ nr_ports, rte_lcore_count() - 1);
+ return 0;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ in_sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+
+ ret = launch_workers_and_wait(worker_flow_based_pipeline,
+ worker_flow_based_pipeline,
+ total_events, nr_ports, out_sched_type);
+ if (ret)
+ return -1;
+
+ if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+ out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ /* Check the events order maintained or not */
+ return seqn_list_check(total_events);
+ }
+ return 0;
+}
+
+
+/* Multi port ordered to atomic transaction */
+static int
+test_multi_port_flow_ordered_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_ordered_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_ordered_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_atomic_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_atomic_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_atomic_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_parallel_to_atomic(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_parallel_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_parallel_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_group_based_pipeline(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint8_t new_sched_type = param->sched_type;
+ rte_atomic32_t *total_events = param->total_events;
+ uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+ dequeue_tmo_ticks);
+ if (!valid_event)
+ continue;
+
+ /* Events from stage 0(group 0) */
+ if (ev.queue_id == 0) {
+ /* Move to atomic flow to maintain the ordering */
+ ev.flow_id = 0x2;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sched_type = new_sched_type;
+ ev.queue_id = 1; /* Stage 1*/
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
+ if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ssovf_log_dbg("Failed to update seqn_list");
+ return -1;
+ }
+ } else {
+ ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id);
+ return -1;
+ }
+ }
+
+
+ return 0;
+}
+
+static int
+test_multiport_queue_sched_type_test(uint8_t in_sched_type,
+ uint8_t out_sched_type)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint32_t nr_ports;
+ int ret;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ if (queue_count < 2 || !nr_ports) {
+ ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d",
+ __func__, queue_count, nr_ports,
+ rte_lcore_count() - 1);
+ return 0;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ in_sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ total_events /* events */);
+ if (ret)
+ return -1;
+
+ ret = launch_workers_and_wait(worker_group_based_pipeline,
+ worker_group_based_pipeline,
+ total_events, nr_ports, out_sched_type);
+ if (ret)
+ return -1;
+
+ if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+ out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ /* Check the events order maintained or not */
+ return seqn_list_check(total_events);
+ }
+ return 0;
+}
+
+static int
+test_multi_port_queue_ordered_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_ordered_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_ordered_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_atomic_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_atomic_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_atomic_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_parallel_to_atomic(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_parallel_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_parallel_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.sub_event_type == 255) { /* last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type++;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+static int
+launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
+{
+ uint32_t nr_ports;
+ int ret;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__,
+ nr_ports, rte_lcore_count() - 1);
+ return 0;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
+ 0 /* queue */,
+ 0 /* port */,
+ MAX_EVENTS /* events */);
+ if (ret)
+ return -1;
+
+ return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
+ 0xff /* invalid */);
+}
+
+/* Flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_flow_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_flow_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint8_t nr_queues = queue_count;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.queue_id == nr_queues - 1) { /* last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.queue_id++;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+/* Queue based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_queue_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_queue_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint32_t queue_count;
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count), "Queue count get failed");
+ uint8_t nr_queues = queue_count;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.queue_id == nr_queues - 1) { /* Last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.queue_id++;
+ ev.sub_event_type = rte_rand() % 256;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+/* Queue and flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_mixed_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_mixed_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_ordered_flow_producer(void *arg)
+{
+ struct test_core_param *param = arg;
+ uint8_t port = param->port;
+ struct rte_mbuf *m;
+ int counter = 0;
+
+ while (counter < NUM_PACKETS) {
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ if (m == NULL)
+ continue;
+
+ m->seqn = counter++;
+
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ ev.flow_id = 0x1; /* Generate a fat flow */
+ ev.sub_event_type = 0;
+ /* Inject the new event */
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+ ev.queue_id = 0;
+ ev.mbuf = m;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+
+ return 0;
+}
+
+static inline int
+test_producer_consumer_ingress_order_test(int (*fn)(void *))
+{
+ uint32_t nr_ports;
+
+ RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+ RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &nr_ports), "Port count get failed");
+ nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+ if (rte_lcore_count() < 3 || nr_ports < 2) {
+ ssovf_log_dbg("### Not enough cores for %s test.", __func__);
+ return 0;
+ }
+
+ launch_workers_and_wait(worker_ordered_flow_producer, fn,
+ NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
+ /* Check the events order maintained or not */
+ return seqn_list_check(NUM_PACKETS);
+}
+
+/* Flow based producer consumer ingress order test */
+static int
+test_flow_producer_consumer_ingress_order_test(void)
+{
+ return test_producer_consumer_ingress_order_test(
+ worker_flow_based_pipeline);
+}
+
+/* Queue based producer consumer ingress order test */
+static int
+test_queue_producer_consumer_ingress_order_test(void)
+{
+ return test_producer_consumer_ingress_order_test(
+ worker_group_based_pipeline);
+}
+
+static void octeontx_test_run(int (*setup)(void), void (*tdown)(void),
+ int (*test)(void), const char *name)
+{
+ if (setup() < 0) {
+ ssovf_log_selftest("Error setting up test %s", name);
+ unsupported++;
+ } else {
+ if (test() < 0) {
+ failed++;
+ ssovf_log_selftest("%s Failed", name);
+ } else {
+ passed++;
+ ssovf_log_selftest("%s Passed", name);
+ }
+ }
+
+ total++;
+ tdown();
+}
+
+int
+test_eventdev_octeontx(void)
+{
+ testsuite_setup();
+
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_queue_enq_single_port_deq);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_queue_enq_multi_port_deq);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_to_port_single_link);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_to_port_multi_link);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_ordered);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_parallel);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_max_stages_random_sched_type);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_max_stages_random_sched_type);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_multi_port_mixed_max_stages_random_sched_type);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_flow_producer_consumer_ingress_order_test);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_queue_producer_consumer_ingress_order_test);
+ OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
+ test_multi_queue_priority);
+ OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
+ test_multi_port_flow_ordered_to_atomic);
+ OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
+ test_multi_port_queue_ordered_to_atomic);
+
+ ssovf_log_selftest("Total tests : %d", total);
+ ssovf_log_selftest("Passed : %d", passed);
+ ssovf_log_selftest("Failed : %d", failed);
+ ssovf_log_selftest("Not supported : %d", unsupported);
+
+ testsuite_teardown();
+
+ if (failed)
+ return -1;
+
+ return 0;
+}
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index 5e17c7b8..753c1e9f 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include "ssovf_worker.h"
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index bf76ac88..d55018a9 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -1,36 +1,7 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
-
#include <rte_common.h>
#include <rte_branch_prediction.h>
@@ -53,7 +24,7 @@ enum {
/* SSO Operations */
static __rte_always_inline struct rte_mbuf *
-ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
+ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
{
struct rte_mbuf *mbuf;
octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
@@ -69,7 +40,7 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
mbuf->data_len = mbuf->pkt_len;
mbuf->nb_segs = 1;
mbuf->ol_flags = 0;
- mbuf->port = port_id;
+ mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
rte_mbuf_refcnt_set(mbuf, 1);
return mbuf;
}
@@ -89,7 +60,7 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
ev->event = sched_type_queue | (get_work0 & 0xffffffff);
if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
- (ev->event >> 20) & 0xF);
+ (ev->event >> 20) & 0x7F);
} else {
ev->u64 = get_work1;
}
diff --git a/drivers/event/opdl/Makefile b/drivers/event/opdl/Makefile
new file mode 100644
index 00000000..cea8118d
--- /dev/null
+++ b/drivers/event/opdl/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_opdl_event.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# for older GCC versions, allow us to initialize an event using
+# designated initializers.
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1)
+CFLAGS += -Wno-missing-field-initializers
+endif
+endif
+
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_kvargs
+LDLIBS += -lrte_bus_vdev -lrte_mbuf -lrte_mempool
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_evdev_opdl_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_evdev_init.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_evdev_xstats.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl_test.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
new file mode 100644
index 00000000..77083691
--- /dev/null
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -0,0 +1,769 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_lcore.h>
+#include <rte_memzone.h>
+#include <rte_kvargs.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+
+#include "opdl_evdev.h"
+#include "opdl_ring.h"
+#include "opdl_log.h"
+
+#define EVENTDEV_NAME_OPDL_PMD event_opdl
+#define NUMA_NODE_ARG "numa_node"
+#define DO_VALIDATION_ARG "do_validation"
+#define DO_TEST_ARG "self_test"
+
+
+static void
+opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
+
+uint16_t
+opdl_event_enqueue_burst(void *port,
+ const struct rte_event ev[],
+ uint16_t num)
+{
+ struct opdl_port *p = port;
+
+ if (unlikely(!p->opdl->data->dev_started))
+ return 0;
+
+
+ /* either rx_enqueue or disclaim*/
+ return p->enq(p, ev, num);
+}
+
+uint16_t
+opdl_event_enqueue(void *port, const struct rte_event *ev)
+{
+ struct opdl_port *p = port;
+
+ if (unlikely(!p->opdl->data->dev_started))
+ return 0;
+
+
+ return p->enq(p, ev, 1);
+}
+
+uint16_t
+opdl_event_dequeue_burst(void *port,
+ struct rte_event *ev,
+ uint16_t num,
+ uint64_t wait)
+{
+ struct opdl_port *p = (void *)port;
+
+ RTE_SET_USED(wait);
+
+ if (unlikely(!p->opdl->data->dev_started))
+ return 0;
+
+ /* This function pointer can point to tx_dequeue or claim*/
+ return p->deq(p, ev, num);
+}
+
+uint16_t
+opdl_event_dequeue(void *port,
+ struct rte_event *ev,
+ uint64_t wait)
+{
+ struct opdl_port *p = (void *)port;
+
+ if (unlikely(!p->opdl->data->dev_started))
+ return 0;
+
+ RTE_SET_USED(wait);
+
+ return p->deq(p, ev, 1);
+}
+
+static int
+opdl_port_link(struct rte_eventdev *dev,
+ void *port,
+ const uint8_t queues[],
+ const uint8_t priorities[],
+ uint16_t num)
+{
+ struct opdl_port *p = port;
+
+ RTE_SET_USED(priorities);
+ RTE_SET_USED(dev);
+
+ if (unlikely(dev->data->dev_started)) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Attempt to link queue (%u) to port %d while device started\n",
+ dev->data->dev_id,
+ queues[0],
+ p->id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ /* Max of 1 queue per port */
+ if (num > 1) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Attempt to link more than one queue (%u) to port %d requested\n",
+ dev->data->dev_id,
+ num,
+ p->id);
+ rte_errno = -EDQUOT;
+ return 0;
+ }
+
+ if (!p->configured) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "port %d not configured, cannot link to %u\n",
+ dev->data->dev_id,
+ p->id,
+ queues[0]);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (p->external_qid != OPDL_INVALID_QID) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "port %d already linked to queue %u, cannot link to %u\n",
+ dev->data->dev_id,
+ p->id,
+ p->external_qid,
+ queues[0]);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ p->external_qid = queues[0];
+
+ return 1;
+}
+
+static int
+opdl_port_unlink(struct rte_eventdev *dev,
+ void *port,
+ uint8_t queues[],
+ uint16_t nb_unlinks)
+{
+ struct opdl_port *p = port;
+
+ RTE_SET_USED(queues);
+ RTE_SET_USED(nb_unlinks);
+
+ if (unlikely(dev->data->dev_started)) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Attempt to unlink queue (%u) to port %d while device started\n",
+ dev->data->dev_id,
+ queues[0],
+ p->id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+ RTE_SET_USED(nb_unlinks);
+
+ /* Port Stuff */
+ p->queue_id = OPDL_INVALID_QID;
+ p->p_type = OPDL_INVALID_PORT;
+ p->external_qid = OPDL_INVALID_QID;
+
+ /* always unlink 0 queue due to statice pipeline */
+ return 0;
+}
+
+static int
+opdl_port_setup(struct rte_eventdev *dev,
+ uint8_t port_id,
+ const struct rte_event_port_conf *conf)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ struct opdl_port *p = &device->ports[port_id];
+
+ RTE_SET_USED(conf);
+
+ /* Check if port already configured */
+ if (p->configured) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Attempt to setup port %d which is already setup\n",
+ dev->data->dev_id,
+ p->id);
+ return -EDQUOT;
+ }
+
+ *p = (struct opdl_port){0}; /* zero entire structure */
+ p->id = port_id;
+ p->opdl = device;
+ p->queue_id = OPDL_INVALID_QID;
+ p->external_qid = OPDL_INVALID_QID;
+ dev->data->ports[port_id] = p;
+ rte_smp_wmb();
+ p->configured = 1;
+ device->nb_ports++;
+ return 0;
+}
+
+static void
+opdl_port_release(void *port)
+{
+ struct opdl_port *p = (void *)port;
+
+ if (p == NULL ||
+ p->opdl->data->dev_started) {
+ return;
+ }
+
+ p->configured = 0;
+ p->initialized = 0;
+}
+
+static void
+opdl_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold = MAX_OPDL_CONS_Q_DEPTH;
+ port_conf->dequeue_depth = MAX_OPDL_CONS_Q_DEPTH;
+ port_conf->enqueue_depth = MAX_OPDL_CONS_Q_DEPTH;
+}
+
+static int
+opdl_queue_setup(struct rte_eventdev *dev,
+ uint8_t queue_id,
+ const struct rte_event_queue_conf *conf)
+{
+ enum queue_type type;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ /* Extra sanity check, probably not needed */
+ if (queue_id == OPDL_INVALID_QID) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Invalid queue id %u requested\n",
+ dev->data->dev_id,
+ queue_id);
+ return -EINVAL;
+ }
+
+ if (device->nb_q_md > device->max_queue_nb) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Max number of queues %u exceeded by request %u\n",
+ dev->data->dev_id,
+ device->max_queue_nb,
+ device->nb_q_md);
+ return -EINVAL;
+ }
+
+ if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
+ & conf->event_queue_cfg) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "QUEUE_CFG_ALL_TYPES not supported\n",
+ dev->data->dev_id);
+ return -ENOTSUP;
+ } else if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK
+ & conf->event_queue_cfg) {
+ type = OPDL_Q_TYPE_SINGLE_LINK;
+ } else {
+ switch (conf->schedule_type) {
+ case RTE_SCHED_TYPE_ORDERED:
+ type = OPDL_Q_TYPE_ORDERED;
+ break;
+ case RTE_SCHED_TYPE_ATOMIC:
+ type = OPDL_Q_TYPE_ATOMIC;
+ break;
+ case RTE_SCHED_TYPE_PARALLEL:
+ type = OPDL_Q_TYPE_ORDERED;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Unknown queue type %d requested\n",
+ dev->data->dev_id,
+ conf->event_queue_cfg);
+ return -EINVAL;
+ }
+ }
+ /* Check if queue id has been setup already */
+ uint32_t i;
+ for (i = 0; i < device->nb_q_md; i++) {
+ if (device->q_md[i].ext_id == queue_id) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue id %u already setup\n",
+ dev->data->dev_id,
+ queue_id);
+ return -EINVAL;
+ }
+ }
+
+ device->q_md[device->nb_q_md].ext_id = queue_id;
+ device->q_md[device->nb_q_md].type = type;
+ device->q_md[device->nb_q_md].setup = 1;
+ device->nb_q_md++;
+
+ return 1;
+}
+
+static void
+opdl_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ RTE_SET_USED(queue_id);
+
+ if (device->data->dev_started)
+ return;
+
+}
+
+static void
+opdl_queue_def_conf(struct rte_eventdev *dev,
+ uint8_t queue_id,
+ struct rte_event_queue_conf *conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ static const struct rte_event_queue_conf default_conf = {
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1,
+ .event_queue_cfg = 0,
+ .schedule_type = RTE_SCHED_TYPE_ORDERED,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ };
+
+ *conf = default_conf;
+}
+
+
+static int
+opdl_dev_configure(const struct rte_eventdev *dev)
+{
+ struct opdl_evdev *opdl = opdl_pmd_priv(dev);
+ const struct rte_eventdev_data *data = dev->data;
+ const struct rte_event_dev_config *conf = &data->dev_conf;
+
+ opdl->max_queue_nb = conf->nb_event_queues;
+ opdl->max_port_nb = conf->nb_event_ports;
+ opdl->nb_events_limit = conf->nb_events_limit;
+
+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "DEQUEUE_TIMEOUT not supported\n",
+ dev->data->dev_id);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+static void
+opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
+{
+ RTE_SET_USED(dev);
+
+ static const struct rte_event_dev_info evdev_opdl_info = {
+ .driver_name = OPDL_PMD_NAME,
+ .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
+ .max_event_queue_flows = OPDL_QID_NUM_FIDS,
+ .max_event_queue_priority_levels = OPDL_Q_PRIORITY_MAX,
+ .max_event_priority_levels = OPDL_IQS_MAX,
+ .max_event_ports = OPDL_PORTS_MAX,
+ .max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
+ .max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
+ .max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
+ .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
+ };
+
+ *info = evdev_opdl_info;
+}
+
+static void
+opdl_dump(struct rte_eventdev *dev, FILE *f)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return;
+
+ fprintf(f,
+ "\n\n -- RING STATISTICS --\n");
+ uint32_t i;
+ for (i = 0; i < device->nb_opdls; i++)
+ opdl_ring_dump(device->opdl[i], f);
+
+ fprintf(f,
+ "\n\n -- PORT STATISTICS --\n"
+ "Type Port Index Port Id Queue Id Av. Req Size "
+ "Av. Grant Size Av. Cycles PP"
+ " Empty DEQs Non Empty DEQs Pkts Processed\n");
+
+ for (i = 0; i < device->max_port_nb; i++) {
+ char queue_id[64];
+ char total_cyc[64];
+ const char *p_type;
+
+ uint64_t cne, cpg;
+ struct opdl_port *port = &device->ports[i];
+
+ if (port->initialized) {
+ cne = port->port_stat[claim_non_empty];
+ cpg = port->port_stat[claim_pkts_granted];
+ if (port->p_type == OPDL_REGULAR_PORT)
+ p_type = "REG";
+ else if (port->p_type == OPDL_PURE_RX_PORT)
+ p_type = " RX";
+ else if (port->p_type == OPDL_PURE_TX_PORT)
+ p_type = " TX";
+ else if (port->p_type == OPDL_ASYNC_PORT)
+ p_type = "SYNC";
+ else
+ p_type = "????";
+
+ sprintf(queue_id, "%02u", port->external_qid);
+ if (port->p_type == OPDL_REGULAR_PORT ||
+ port->p_type == OPDL_ASYNC_PORT)
+ sprintf(total_cyc,
+ " %'16"PRIu64"",
+ (cpg != 0 ?
+ port->port_stat[total_cycles] / cpg
+ : 0));
+ else
+ sprintf(total_cyc,
+ " ----");
+ fprintf(f,
+ "%4s %10u %8u %9s %'16"PRIu64" %'16"PRIu64" %s "
+ "%'16"PRIu64" %'16"PRIu64" %'16"PRIu64"\n",
+ p_type,
+ i,
+ port->id,
+ (port->external_qid == OPDL_INVALID_QID ? "---"
+ : queue_id),
+ (cne != 0 ?
+ port->port_stat[claim_pkts_requested] / cne
+ : 0),
+ (cne != 0 ?
+ port->port_stat[claim_pkts_granted] / cne
+ : 0),
+ total_cyc,
+ port->port_stat[claim_empty],
+ port->port_stat[claim_non_empty],
+ port->port_stat[claim_pkts_granted]);
+ }
+ }
+ fprintf(f, "\n");
+}
+
+
+static void
+opdl_stop(struct rte_eventdev *dev)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ opdl_xstats_uninit(dev);
+
+ destroy_queues_and_rings(dev);
+
+
+ device->started = 0;
+
+ rte_smp_wmb();
+}
+
+static int
+opdl_start(struct rte_eventdev *dev)
+{
+ int err = 0;
+
+ if (!err)
+ err = create_queues_and_rings(dev);
+
+
+ if (!err)
+ err = assign_internal_queue_ids(dev);
+
+
+ if (!err)
+ err = initialise_queue_zero_ports(dev);
+
+
+ if (!err)
+ err = initialise_all_other_ports(dev);
+
+
+ if (!err)
+ err = check_queues_linked(dev);
+
+
+ if (!err)
+ err = opdl_add_event_handlers(dev);
+
+
+ if (!err)
+ err = build_all_dependencies(dev);
+
+ if (!err) {
+ opdl_xstats_init(dev);
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
+ "SUCCESS : Created %u total queues (%u ex, %u in),"
+ " %u opdls, %u event_dev ports, %u input ports",
+ opdl_pmd_dev_id(device),
+ device->nb_queues,
+ (device->nb_queues - device->nb_opdls),
+ device->nb_opdls,
+ device->nb_opdls,
+ device->nb_ports,
+ device->queue[0].nb_ports);
+ } else
+ opdl_stop(dev);
+
+ return err;
+}
+
+static int
+opdl_close(struct rte_eventdev *dev)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ uint32_t i;
+
+ for (i = 0; i < device->max_port_nb; i++) {
+ memset(&device->ports[i],
+ 0,
+ sizeof(struct opdl_port));
+ }
+
+ memset(&device->s_md,
+ 0x0,
+ sizeof(struct opdl_stage_meta_data)*OPDL_PORTS_MAX);
+
+ memset(&device->q_md,
+ 0xFF,
+ sizeof(struct opdl_queue_meta_data)*OPDL_MAX_QUEUES);
+
+
+ memset(device->q_map_ex_to_in,
+ 0,
+ sizeof(uint8_t)*OPDL_INVALID_QID);
+
+ opdl_xstats_uninit(dev);
+
+ device->max_port_nb = 0;
+
+ device->max_queue_nb = 0;
+
+ device->nb_opdls = 0;
+
+ device->nb_queues = 0;
+
+ device->nb_ports = 0;
+
+ device->nb_q_md = 0;
+
+ dev->data->nb_queues = 0;
+
+ dev->data->nb_ports = 0;
+
+
+ return 0;
+}
+
+static int
+assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *socket_id = opaque;
+ *socket_id = atoi(value);
+ if (*socket_id >= RTE_MAX_NUMA_NODES)
+ return -1;
+ return 0;
+}
+
+static int
+set_do_validation(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *do_val = opaque;
+ *do_val = atoi(value);
+ if (*do_val != 0)
+ *do_val = 1;
+
+ return 0;
+}
+static int
+set_do_test(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *do_test = opaque;
+
+ *do_test = atoi(value);
+
+ if (*do_test != 0)
+ *do_test = 1;
+ return 0;
+}
+
+static int
+opdl_probe(struct rte_vdev_device *vdev)
+{
+ static const struct rte_eventdev_ops evdev_opdl_ops = {
+ .dev_configure = opdl_dev_configure,
+ .dev_infos_get = opdl_info_get,
+ .dev_close = opdl_close,
+ .dev_start = opdl_start,
+ .dev_stop = opdl_stop,
+ .dump = opdl_dump,
+
+ .queue_def_conf = opdl_queue_def_conf,
+ .queue_setup = opdl_queue_setup,
+ .queue_release = opdl_queue_release,
+ .port_def_conf = opdl_port_def_conf,
+ .port_setup = opdl_port_setup,
+ .port_release = opdl_port_release,
+ .port_link = opdl_port_link,
+ .port_unlink = opdl_port_unlink,
+
+
+ .xstats_get = opdl_xstats_get,
+ .xstats_get_names = opdl_xstats_get_names,
+ .xstats_get_by_name = opdl_xstats_get_by_name,
+ .xstats_reset = opdl_xstats_reset,
+ };
+
+ static const char *const args[] = {
+ NUMA_NODE_ARG,
+ DO_VALIDATION_ARG,
+ DO_TEST_ARG,
+ NULL
+ };
+ const char *name;
+ const char *params;
+ struct rte_eventdev *dev;
+ struct opdl_evdev *opdl;
+ int socket_id = rte_socket_id();
+ int do_validation = 0;
+ int do_test = 0;
+ int str_len;
+ int test_result = 0;
+
+ name = rte_vdev_device_name(vdev);
+ params = rte_vdev_device_args(vdev);
+ if (params != NULL && params[0] != '\0') {
+ struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+ if (!kvlist) {
+ PMD_DRV_LOG(INFO,
+ "Ignoring unsupported parameters when creating device '%s'\n",
+ name);
+ } else {
+ int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
+ assign_numa_node, &socket_id);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "%s: Error parsing numa node parameter",
+ name);
+
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist, DO_VALIDATION_ARG,
+ set_do_validation, &do_validation);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "%s: Error parsing do validation parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist, DO_TEST_ARG,
+ set_do_test, &do_test);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "%s: Error parsing do test parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ rte_kvargs_free(kvlist);
+ }
+ }
+ dev = rte_event_pmd_vdev_init(name,
+ sizeof(struct opdl_evdev), socket_id);
+
+ if (dev == NULL) {
+ PMD_DRV_LOG(ERR, "eventdev vdev init() failed");
+ return -EFAULT;
+ }
+
+ PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
+ "Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]"
+ " , self_test:[%s]\n",
+ dev->data->dev_id,
+ name,
+ socket_id,
+ (do_validation ? "true" : "false"),
+ (do_test ? "true" : "false"));
+
+ dev->dev_ops = &evdev_opdl_ops;
+
+ dev->enqueue = opdl_event_enqueue;
+ dev->enqueue_burst = opdl_event_enqueue_burst;
+ dev->enqueue_new_burst = opdl_event_enqueue_burst;
+ dev->enqueue_forward_burst = opdl_event_enqueue_burst;
+ dev->dequeue = opdl_event_dequeue;
+ dev->dequeue_burst = opdl_event_dequeue_burst;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ opdl = dev->data->dev_private;
+ opdl->data = dev->data;
+ opdl->socket = socket_id;
+ opdl->do_validation = do_validation;
+ opdl->do_test = do_test;
+ str_len = strlen(name);
+ memcpy(opdl->service_name, name, str_len);
+
+ if (do_test == 1)
+ test_result = opdl_selftest();
+
+ return test_result;
+}
+
+static int
+opdl_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ PMD_DRV_LOG(INFO, "Closing eventdev opdl device %s\n", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver evdev_opdl_pmd_drv = {
+ .probe = opdl_probe,
+ .remove = opdl_remove
+};
+
+RTE_INIT(opdl_init_log);
+
+static void
+opdl_init_log(void)
+{
+ opdl_logtype_driver = rte_log_register("pmd.event.opdl.driver");
+ if (opdl_logtype_driver >= 0)
+ rte_log_set_level(opdl_logtype_driver, RTE_LOG_INFO);
+}
+
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OPDL_PMD, evdev_opdl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(event_opdl, NUMA_NODE_ARG "=<int>"
+ DO_VALIDATION_ARG "=<int>" DO_TEST_ARG "=<int>");
diff --git a/drivers/event/opdl/opdl_evdev.h b/drivers/event/opdl/opdl_evdev.h
new file mode 100644
index 00000000..610b58b3
--- /dev/null
+++ b/drivers/event/opdl/opdl_evdev.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _OPDL_EVDEV_H_
+#define _OPDL_EVDEV_H_
+
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd_vdev.h>
+#include <rte_atomic.h>
+#include "opdl_ring.h"
+
+#define OPDL_QID_NUM_FIDS 1024
+#define OPDL_IQS_MAX 1
+#define OPDL_Q_PRIORITY_MAX 1
+#define OPDL_PORTS_MAX 64
+#define MAX_OPDL_CONS_Q_DEPTH 128
+/* OPDL size */
+#define OPDL_INFLIGHT_EVENTS_TOTAL 4096
+/* allow for lots of over-provisioning */
+#define OPDL_FRAGMENTS_MAX 1
+
+/* report dequeue burst sizes in buckets */
+#define OPDL_DEQ_STAT_BUCKET_SHIFT 2
+/* how many packets pulled from port by sched */
+#define SCHED_DEQUEUE_BURST_SIZE 32
+
+/* size of our history list */
+#define OPDL_PORT_HIST_LIST (MAX_OPDL_PROD_Q_DEPTH)
+
+/* how many data points use for average stats */
+#define NUM_SAMPLES 64
+
+#define EVENTDEV_NAME_OPDL_PMD event_opdl
+#define OPDL_PMD_NAME RTE_STR(event_opdl)
+#define OPDL_PMD_NAME_MAX 64
+
+#define OPDL_INVALID_QID 255
+
+#define OPDL_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
+
+#define OPDL_NUM_POLL_BUCKETS \
+ (MAX_OPDL_CONS_Q_DEPTH >> OPDL_DEQ_STAT_BUCKET_SHIFT)
+
+enum {
+ QE_FLAG_VALID_SHIFT = 0,
+ QE_FLAG_COMPLETE_SHIFT,
+ QE_FLAG_NOT_EOP_SHIFT,
+ _QE_FLAG_COUNT
+};
+
+enum port_type {
+ OPDL_INVALID_PORT = 0,
+ OPDL_REGULAR_PORT = 1,
+ OPDL_PURE_RX_PORT,
+ OPDL_PURE_TX_PORT,
+ OPDL_ASYNC_PORT
+};
+
+enum queue_type {
+ OPDL_Q_TYPE_INVALID = 0,
+ OPDL_Q_TYPE_SINGLE_LINK = 1,
+ OPDL_Q_TYPE_ATOMIC,
+ OPDL_Q_TYPE_ORDERED
+};
+
+enum queue_pos {
+ OPDL_Q_POS_START = 0,
+ OPDL_Q_POS_MIDDLE,
+ OPDL_Q_POS_END
+};
+
+#define QE_FLAG_VALID (1 << QE_FLAG_VALID_SHIFT) /* for NEW FWD, FRAG */
+#define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP */
+#define QE_FLAG_NOT_EOP (1 << QE_FLAG_NOT_EOP_SHIFT) /* set for FRAG only */
+
+static const uint8_t opdl_qe_flag_map[] = {
+ QE_FLAG_VALID /* NEW Event */,
+ QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
+ QE_FLAG_COMPLETE /* RELEASE Event */,
+
+ /* Values which can be used for future support for partial
+ * events, i.e. where one event comes back to the scheduler
+ * as multiple which need to be tracked together
+ */
+ QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
+};
+
+
+enum port_xstat_name {
+ claim_pkts_requested = 0,
+ claim_pkts_granted,
+ claim_non_empty,
+ claim_empty,
+ total_cycles,
+ max_num_port_xstat
+};
+
+#define OPDL_MAX_PORT_XSTAT_NUM (OPDL_PORTS_MAX * max_num_port_xstat)
+
+struct opdl_port;
+
+typedef uint16_t (*opdl_enq_operation)(struct opdl_port *port,
+ const struct rte_event ev[],
+ uint16_t num);
+
+typedef uint16_t (*opdl_deq_operation)(struct opdl_port *port,
+ struct rte_event ev[],
+ uint16_t num);
+
+struct opdl_evdev;
+
+struct opdl_stage_meta_data {
+ uint32_t num_claimed; /* number of entries claimed by this stage */
+ uint32_t burst_sz; /* Port claim burst size */
+};
+
+struct opdl_port {
+
+ /* back pointer */
+ struct opdl_evdev *opdl;
+
+ /* enq handler & stage instance */
+ opdl_enq_operation enq;
+ struct opdl_stage *enq_stage_inst;
+
+ /* deq handler & stage instance */
+ opdl_deq_operation deq;
+ struct opdl_stage *deq_stage_inst;
+
+ /* port id has correctly been set */
+ uint8_t configured;
+
+ /* set when the port is initialized */
+ uint8_t initialized;
+
+ /* A numeric ID for the port */
+ uint8_t id;
+
+ /* Space for claimed entries */
+ struct rte_event *entries[MAX_OPDL_CONS_Q_DEPTH];
+
+ /* RX/REGULAR/TX/ASYNC - determined on position in queue */
+ enum port_type p_type;
+
+ /* if the claim is static atomic type */
+ bool atomic_claim;
+
+ /* Queue linked to this port - internal queue id*/
+ uint8_t queue_id;
+
+ /* Queue linked to this port - external queue id*/
+ uint8_t external_qid;
+
+ /* Next queue linked to this port - external queue id*/
+ uint8_t next_external_qid;
+
+ /* number of instances of this stage */
+ uint32_t num_instance;
+
+ /* instance ID of this stage*/
+ uint32_t instance_id;
+
+ /* track packets in and out of this port */
+ uint64_t port_stat[max_num_port_xstat];
+ uint64_t start_cycles;
+};
+
+struct opdl_queue_meta_data {
+ uint8_t ext_id;
+ enum queue_type type;
+ int8_t setup;
+};
+
+struct opdl_xstats_entry {
+ struct rte_event_dev_xstats_name stat;
+ unsigned int id;
+ uint64_t *value;
+};
+
+struct opdl_queue {
+
+ /* Opdl ring this queue is associated with */
+ uint32_t opdl_id;
+
+ /* type and position have correctly been set */
+ uint8_t configured;
+
+ /* port number and associated ports have been associated */
+ uint8_t initialized;
+
+ /* type of this queue (Atomic, Ordered, Parallel, Direct)*/
+ enum queue_type q_type;
+
+ /* position of queue (START, MIDDLE, END) */
+ enum queue_pos q_pos;
+
+ /* external queue id. It is mapped to the queue position */
+ uint8_t external_qid;
+
+ struct opdl_port *ports[OPDL_PORTS_MAX];
+ uint32_t nb_ports;
+
+ /* priority, reserved for future */
+ uint8_t priority;
+};
+
+
+#define OPDL_TUR_PER_DEV 12
+
+/* PMD needs an extra queue per Opdl */
+#define OPDL_MAX_QUEUES (RTE_EVENT_MAX_QUEUES_PER_DEV - OPDL_TUR_PER_DEV)
+
+
+struct opdl_evdev {
+ struct rte_eventdev_data *data;
+
+ uint8_t started;
+
+ /* Max number of ports and queues*/
+ uint32_t max_port_nb;
+ uint32_t max_queue_nb;
+
+ /* slots in the opdl ring */
+ uint32_t nb_events_limit;
+
+ /*
+ * Array holding all opdl for this device
+ */
+ struct opdl_ring *opdl[OPDL_TUR_PER_DEV];
+ uint32_t nb_opdls;
+
+ struct opdl_queue_meta_data q_md[OPDL_MAX_QUEUES];
+ uint32_t nb_q_md;
+
+ /* Internal queues - one per logical queue */
+ struct opdl_queue
+ queue[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
+
+ uint32_t nb_queues;
+
+ struct opdl_stage_meta_data s_md[OPDL_PORTS_MAX];
+
+ /* Contains all ports - load balanced and directed */
+ struct opdl_port ports[OPDL_PORTS_MAX] __rte_cache_aligned;
+ uint32_t nb_ports;
+
+ uint8_t q_map_ex_to_in[OPDL_INVALID_QID];
+
+ /* Stats */
+ struct opdl_xstats_entry port_xstat[OPDL_MAX_PORT_XSTAT_NUM];
+
+ char service_name[OPDL_PMD_NAME_MAX];
+ int socket;
+ int do_validation;
+ int do_test;
+};
+
+
+static inline struct opdl_evdev *
+opdl_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+static inline uint8_t
+opdl_pmd_dev_id(const struct opdl_evdev *opdl)
+{
+ return opdl->data->dev_id;
+}
+
+static inline const struct opdl_evdev *
+opdl_pmd_priv_const(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+uint16_t opdl_event_enqueue(void *port, const struct rte_event *ev);
+uint16_t opdl_event_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t num);
+
+uint16_t opdl_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
+uint16_t opdl_event_dequeue_burst(void *port, struct rte_event *ev,
+ uint16_t num, uint64_t wait);
+void opdl_event_schedule(struct rte_eventdev *dev);
+
+void opdl_xstats_init(struct rte_eventdev *dev);
+int opdl_xstats_uninit(struct rte_eventdev *dev);
+int opdl_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size);
+int opdl_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n);
+uint64_t opdl_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id);
+int opdl_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+int opdl_add_event_handlers(struct rte_eventdev *dev);
+int build_all_dependencies(struct rte_eventdev *dev);
+int check_queues_linked(struct rte_eventdev *dev);
+int create_queues_and_rings(struct rte_eventdev *dev);
+int initialise_all_other_ports(struct rte_eventdev *dev);
+int initialise_queue_zero_ports(struct rte_eventdev *dev);
+int assign_internal_queue_ids(struct rte_eventdev *dev);
+void destroy_queues_and_rings(struct rte_eventdev *dev);
+int opdl_selftest(void);
+
+#endif /* _OPDL_EVDEV_H_ */
diff --git a/drivers/event/opdl/opdl_evdev_init.c b/drivers/event/opdl/opdl_evdev_init.c
new file mode 100644
index 00000000..1454de53
--- /dev/null
+++ b/drivers/event/opdl/opdl_evdev_init.c
@@ -0,0 +1,940 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+#include <rte_memzone.h>
+
+#include "opdl_evdev.h"
+#include "opdl_ring.h"
+#include "opdl_log.h"
+
+
+static __rte_always_inline uint32_t
+enqueue_check(struct opdl_port *p,
+ const struct rte_event ev[],
+ uint16_t num,
+ uint16_t num_events)
+{
+ uint16_t i;
+
+ if (p->opdl->do_validation) {
+
+ for (i = 0; i < num; i++) {
+ if (ev[i].queue_id != p->next_external_qid) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "ERROR - port:[%u] - event wants"
+ " to enq to q_id[%u],"
+ " but should be [%u]",
+ opdl_pmd_dev_id(p->opdl),
+ p->id,
+ ev[i].queue_id,
+ p->next_external_qid);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+ }
+
+ /* Stats */
+ if (p->p_type == OPDL_PURE_RX_PORT ||
+ p->p_type == OPDL_ASYNC_PORT) {
+ /* Stats */
+ if (num_events) {
+ p->port_stat[claim_pkts_requested] += num;
+ p->port_stat[claim_pkts_granted] += num_events;
+ p->port_stat[claim_non_empty]++;
+ p->start_cycles = rte_rdtsc();
+ } else {
+ p->port_stat[claim_empty]++;
+ p->start_cycles = 0;
+ }
+ } else {
+ if (p->start_cycles) {
+ uint64_t end_cycles = rte_rdtsc();
+ p->port_stat[total_cycles] +=
+ end_cycles - p->start_cycles;
+ }
+ }
+ } else {
+ if (num > 0 &&
+ ev[0].queue_id != p->next_external_qid) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+ }
+
+ return num;
+}
+
+static __rte_always_inline void
+update_on_dequeue(struct opdl_port *p,
+ struct rte_event ev[],
+ uint16_t num,
+ uint16_t num_events)
+{
+ if (p->opdl->do_validation) {
+ int16_t i;
+ for (i = 0; i < num; i++)
+ ev[i].queue_id =
+ p->opdl->queue[p->queue_id].external_qid;
+
+ /* Stats */
+ if (num_events) {
+ p->port_stat[claim_pkts_requested] += num;
+ p->port_stat[claim_pkts_granted] += num_events;
+ p->port_stat[claim_non_empty]++;
+ p->start_cycles = rte_rdtsc();
+ } else {
+ p->port_stat[claim_empty]++;
+ p->start_cycles = 0;
+ }
+ } else {
+ if (num > 0)
+ ev[0].queue_id =
+ p->opdl->queue[p->queue_id].external_qid;
+ }
+}
+
+
+/*
+ * Error RX enqueue:
+ *
+ *
+ */
+
+static uint16_t
+opdl_rx_error_enqueue(struct opdl_port *p,
+ const struct rte_event ev[],
+ uint16_t num)
+{
+ RTE_SET_USED(p);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(num);
+
+ rte_errno = -ENOSPC;
+
+ return 0;
+}
+
+/*
+ * RX enqueue:
+ *
+ * This function handles enqueue for a single input stage_inst with
+ * threadsafe disabled or enabled. eg 1 thread using a stage_inst or
+ * multiple threads sharing a stage_inst
+ */
+
+static uint16_t
+opdl_rx_enqueue(struct opdl_port *p,
+ const struct rte_event ev[],
+ uint16_t num)
+{
+ uint16_t enqueued = 0;
+
+ enqueued = opdl_ring_input(opdl_stage_get_opdl_ring(p->enq_stage_inst),
+ ev,
+ num,
+ false);
+ if (!enqueue_check(p, ev, num, enqueued))
+ return 0;
+
+
+ if (enqueued < num)
+ rte_errno = -ENOSPC;
+
+ return enqueued;
+}
+
+/*
+ * Error TX handler
+ *
+ */
+
+static uint16_t
+opdl_tx_error_dequeue(struct opdl_port *p,
+ struct rte_event ev[],
+ uint16_t num)
+{
+ RTE_SET_USED(p);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(num);
+
+ rte_errno = -ENOSPC;
+
+ return 0;
+}
+
+/*
+ * TX single threaded claim
+ *
+ * This function handles dequeue for a single worker stage_inst with
+ * threadsafe disabled. eg 1 thread using an stage_inst
+ */
+
+static uint16_t
+opdl_tx_dequeue_single_thread(struct opdl_port *p,
+ struct rte_event ev[],
+ uint16_t num)
+{
+ uint16_t returned;
+
+ struct opdl_ring *ring;
+
+ ring = opdl_stage_get_opdl_ring(p->deq_stage_inst);
+
+ returned = opdl_ring_copy_to_burst(ring,
+ p->deq_stage_inst,
+ ev,
+ num,
+ false);
+
+ update_on_dequeue(p, ev, num, returned);
+
+ return returned;
+}
+
+/*
+ * TX multi threaded claim
+ *
+ * This function handles dequeue for multiple worker stage_inst with
+ * threadsafe disabled. eg multiple stage_inst each with its own instance
+ */
+
+static uint16_t
+opdl_tx_dequeue_multi_inst(struct opdl_port *p,
+ struct rte_event ev[],
+ uint16_t num)
+{
+ uint32_t num_events = 0;
+
+ num_events = opdl_stage_claim(p->deq_stage_inst,
+ (void *)ev,
+ num,
+ NULL,
+ false,
+ false);
+
+ update_on_dequeue(p, ev, num, num_events);
+
+ return opdl_stage_disclaim(p->deq_stage_inst, num_events, false);
+}
+
+
+/*
+ * Worker thread claim
+ *
+ */
+
+static uint16_t
+opdl_claim(struct opdl_port *p, struct rte_event ev[], uint16_t num)
+{
+ uint32_t num_events = 0;
+
+ if (unlikely(num > MAX_OPDL_CONS_Q_DEPTH)) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Attempt to dequeue num of events larger than port (%d) max",
+ opdl_pmd_dev_id(p->opdl),
+ p->id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+
+ num_events = opdl_stage_claim(p->deq_stage_inst,
+ (void *)ev,
+ num,
+ NULL,
+ false,
+ p->atomic_claim);
+
+
+ update_on_dequeue(p, ev, num, num_events);
+
+ return num_events;
+}
+
+/*
+ * Worker thread disclaim
+ */
+
+static uint16_t
+opdl_disclaim(struct opdl_port *p, const struct rte_event ev[], uint16_t num)
+{
+ uint16_t enqueued = 0;
+
+ uint32_t i = 0;
+
+ for (i = 0; i < num; i++)
+ opdl_ring_cas_slot(p->enq_stage_inst, &ev[i],
+ i, p->atomic_claim);
+
+ enqueued = opdl_stage_disclaim(p->enq_stage_inst,
+ num,
+ false);
+
+ return enqueue_check(p, ev, num, enqueued);
+}
+
+static __rte_always_inline struct opdl_stage *
+stage_for_port(struct opdl_queue *q, unsigned int i)
+{
+ if (q->q_pos == OPDL_Q_POS_START || q->q_pos == OPDL_Q_POS_MIDDLE)
+ return q->ports[i]->enq_stage_inst;
+ else
+ return q->ports[i]->deq_stage_inst;
+}
+
+static int opdl_add_deps(struct opdl_evdev *device,
+ int q_id,
+ int deps_q_id)
+{
+ unsigned int i, j;
+ int status;
+ struct opdl_ring *ring;
+ struct opdl_queue *queue = &device->queue[q_id];
+ struct opdl_queue *queue_deps = &device->queue[deps_q_id];
+ struct opdl_stage *dep_stages[OPDL_PORTS_MAX];
+
+ /* sanity check that all stages are for same opdl ring */
+ for (i = 0; i < queue->nb_ports; i++) {
+ struct opdl_ring *r =
+ opdl_stage_get_opdl_ring(stage_for_port(queue, i));
+ for (j = 0; j < queue_deps->nb_ports; j++) {
+ struct opdl_ring *rj =
+ opdl_stage_get_opdl_ring(
+ stage_for_port(queue_deps, j));
+ if (r != rj) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Stages and dependents"
+ " are not for same opdl ring",
+ opdl_pmd_dev_id(device));
+ uint32_t k;
+ for (k = 0; k < device->nb_opdls; k++) {
+ opdl_ring_dump(device->opdl[k],
+ stdout);
+ }
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Gather all stages instance in deps */
+ for (i = 0; i < queue_deps->nb_ports; i++)
+ dep_stages[i] = stage_for_port(queue_deps, i);
+
+
+ /* Add all deps for each port->stage_inst in this queue */
+ for (i = 0; i < queue->nb_ports; i++) {
+
+ ring = opdl_stage_get_opdl_ring(stage_for_port(queue, i));
+
+ status = opdl_stage_deps_add(ring,
+ stage_for_port(queue, i),
+ queue->ports[i]->num_instance,
+ queue->ports[i]->instance_id,
+ dep_stages,
+ queue_deps->nb_ports);
+ if (status < 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+opdl_add_event_handlers(struct rte_eventdev *dev)
+{
+ int err = 0;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < device->max_port_nb; i++) {
+
+ struct opdl_port *port = &device->ports[i];
+
+ if (port->configured) {
+ if (port->p_type == OPDL_PURE_RX_PORT) {
+ port->enq = opdl_rx_enqueue;
+ port->deq = opdl_tx_error_dequeue;
+
+ } else if (port->p_type == OPDL_PURE_TX_PORT) {
+
+ port->enq = opdl_rx_error_enqueue;
+
+ if (port->num_instance == 1)
+ port->deq =
+ opdl_tx_dequeue_single_thread;
+ else
+ port->deq = opdl_tx_dequeue_multi_inst;
+
+ } else if (port->p_type == OPDL_REGULAR_PORT) {
+
+ port->enq = opdl_disclaim;
+ port->deq = opdl_claim;
+
+ } else if (port->p_type == OPDL_ASYNC_PORT) {
+
+ port->enq = opdl_rx_enqueue;
+
+ /* Always single instance */
+ port->deq = opdl_tx_dequeue_single_thread;
+ } else {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "port:[%u] has invalid port type - ",
+ opdl_pmd_dev_id(port->opdl),
+ port->id);
+ err = -EINVAL;
+ break;
+ }
+ port->initialized = 1;
+ }
+ }
+
+ if (!err)
+ fprintf(stdout, "Success - enqueue/dequeue handler(s) added\n");
+ return err;
+}
+
+int
+build_all_dependencies(struct rte_eventdev *dev)
+{
+
+ int err = 0;
+ unsigned int i;
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ uint8_t start_qid = 0;
+
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct opdl_queue *queue = &device->queue[i];
+ if (!queue->initialized)
+ break;
+
+ if (queue->q_pos == OPDL_Q_POS_START) {
+ start_qid = i;
+ continue;
+ }
+
+ if (queue->q_pos == OPDL_Q_POS_MIDDLE) {
+ err = opdl_add_deps(device, i, i-1);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "dependency addition for queue:[%u] - FAILED",
+ dev->data->dev_id,
+ queue->external_qid);
+ break;
+ }
+ }
+
+ if (queue->q_pos == OPDL_Q_POS_END) {
+ /* Add this dependency */
+ err = opdl_add_deps(device, i, i-1);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "dependency addition for queue:[%u] - FAILED",
+ dev->data->dev_id,
+ queue->external_qid);
+ break;
+ }
+ /* Add dependency for rx on tx */
+ err = opdl_add_deps(device, start_qid, i);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "dependency addition for queue:[%u] - FAILED",
+ dev->data->dev_id,
+ queue->external_qid);
+ break;
+ }
+ }
+ }
+
+ if (!err)
+ fprintf(stdout, "Success - dependencies built\n");
+
+ return err;
+}
+int
+check_queues_linked(struct rte_eventdev *dev)
+{
+
+ int err = 0;
+ unsigned int i;
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ uint32_t nb_iq = 0;
+
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct opdl_queue *queue = &device->queue[i];
+
+ if (!queue->initialized)
+ break;
+
+ if (queue->external_qid == OPDL_INVALID_QID)
+ nb_iq++;
+
+ if (queue->nb_ports == 0) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue:[%u] has no associated ports",
+ dev->data->dev_id,
+ i);
+ err = -EINVAL;
+ break;
+ }
+ }
+ if (!err) {
+ if ((i - nb_iq) != device->max_queue_nb) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "%u queues counted but should be %u",
+ dev->data->dev_id,
+ i - nb_iq,
+ device->max_queue_nb);
+ err = -1;
+ }
+
+ }
+ return err;
+}
+
+void
+destroy_queues_and_rings(struct rte_eventdev *dev)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ uint32_t i;
+
+ for (i = 0; i < device->nb_opdls; i++) {
+ if (device->opdl[i])
+ opdl_ring_free(device->opdl[i]);
+ }
+
+ memset(&device->queue,
+ 0,
+ sizeof(struct opdl_queue)
+ * RTE_EVENT_MAX_QUEUES_PER_DEV);
+}
+
+#define OPDL_ID(d)(d->nb_opdls - 1)
+
+static __rte_always_inline void
+initialise_queue(struct opdl_evdev *device,
+ enum queue_pos pos,
+ int32_t i)
+{
+ struct opdl_queue *queue = &device->queue[device->nb_queues];
+
+ if (i == -1) {
+ queue->q_type = OPDL_Q_TYPE_ORDERED;
+ queue->external_qid = OPDL_INVALID_QID;
+ } else {
+ queue->q_type = device->q_md[i].type;
+ queue->external_qid = device->q_md[i].ext_id;
+ /* Add ex->in for queues setup */
+ device->q_map_ex_to_in[queue->external_qid] = device->nb_queues;
+ }
+ queue->opdl_id = OPDL_ID(device);
+ queue->q_pos = pos;
+ queue->nb_ports = 0;
+ queue->configured = 1;
+
+ device->nb_queues++;
+}
+
+
+static __rte_always_inline int
+create_opdl(struct opdl_evdev *device)
+{
+ int err = 0;
+
+ char name[RTE_MEMZONE_NAMESIZE];
+
+ snprintf(name, RTE_MEMZONE_NAMESIZE,
+ "%s_%u", device->service_name, device->nb_opdls);
+
+ device->opdl[device->nb_opdls] =
+ opdl_ring_create(name,
+ device->nb_events_limit,
+ sizeof(struct rte_event),
+ device->max_port_nb * 2,
+ device->socket);
+
+ if (!device->opdl[device->nb_opdls]) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "opdl ring %u creation - FAILED",
+ opdl_pmd_dev_id(device),
+ device->nb_opdls);
+ err = -EINVAL;
+ } else {
+ device->nb_opdls++;
+ }
+ return err;
+}
+
+static __rte_always_inline int
+create_link_opdl(struct opdl_evdev *device, uint32_t index)
+{
+
+ int err = 0;
+
+ if (device->q_md[index + 1].type !=
+ OPDL_Q_TYPE_SINGLE_LINK) {
+
+ /* async queue with regular
+ * queue following it
+ */
+
+ /* create a new opdl ring */
+ err = create_opdl(device);
+ if (!err) {
+ /* create an initial
+ * dummy queue for new opdl
+ */
+ initialise_queue(device,
+ OPDL_Q_POS_START,
+ -1);
+ } else {
+ err = -EINVAL;
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue %u, two consecutive"
+ " SINGLE_LINK queues, not allowed",
+ opdl_pmd_dev_id(device),
+ index);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+int
+create_queues_and_rings(struct rte_eventdev *dev)
+{
+ int err = 0;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ device->nb_queues = 0;
+
+ if (device->nb_ports != device->max_port_nb) {
+ PMD_DRV_LOG(ERR, "Number ports setup:%u NOT EQUAL to max port"
+ " number:%u for this device",
+ device->nb_ports,
+ device->max_port_nb);
+ err = -1;
+ }
+
+ if (!err) {
+ /* We will have at least one opdl so create it now */
+ err = create_opdl(device);
+ }
+
+ if (!err) {
+
+ /* Create 1st "dummy" queue */
+ initialise_queue(device,
+ OPDL_Q_POS_START,
+ -1);
+
+ uint32_t i;
+ for (i = 0; i < device->nb_q_md; i++) {
+
+ /* Check */
+ if (!device->q_md[i].setup) {
+
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue meta data slot %u"
+ " not setup - FAILING",
+ dev->data->dev_id,
+ i);
+ err = -EINVAL;
+ break;
+ } else if (device->q_md[i].type !=
+ OPDL_Q_TYPE_SINGLE_LINK) {
+
+ if (!device->q_md[i + 1].setup) {
+ /* Create a simple ORDERED/ATOMIC
+ * queue at the end
+ */
+ initialise_queue(device,
+ OPDL_Q_POS_END,
+ i);
+
+ } else {
+ /* Create a simple ORDERED/ATOMIC
+ * queue in the middle
+ */
+ initialise_queue(device,
+ OPDL_Q_POS_MIDDLE,
+ i);
+ }
+ } else if (device->q_md[i].type ==
+ OPDL_Q_TYPE_SINGLE_LINK) {
+
+ /* create last queue for this opdl */
+ initialise_queue(device,
+ OPDL_Q_POS_END,
+ i);
+
+ err = create_link_opdl(device, i);
+
+ if (err)
+ break;
+
+
+ }
+ }
+ }
+ if (err)
+ destroy_queues_and_rings(dev);
+
+ return err;
+}
+
+
+int
+initialise_all_other_ports(struct rte_eventdev *dev)
+{
+ int err = 0;
+ struct opdl_stage *stage_inst = NULL;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ uint32_t i;
+ for (i = 0; i < device->nb_ports; i++) {
+ struct opdl_port *port = &device->ports[i];
+ struct opdl_queue *queue = &device->queue[port->queue_id];
+
+ if (port->queue_id == 0) {
+ continue;
+ } else if (queue->q_type != OPDL_Q_TYPE_SINGLE_LINK) {
+
+ if (queue->q_pos == OPDL_Q_POS_MIDDLE) {
+
+ /* Regular port with claim/disclaim */
+ stage_inst = opdl_stage_add(
+ device->opdl[queue->opdl_id],
+ false,
+ false);
+ port->deq_stage_inst = stage_inst;
+ port->enq_stage_inst = stage_inst;
+
+ if (queue->q_type == OPDL_Q_TYPE_ATOMIC)
+ port->atomic_claim = true;
+ else
+ port->atomic_claim = false;
+
+ port->p_type = OPDL_REGULAR_PORT;
+
+ /* Add the port to the queue array of ports */
+ queue->ports[queue->nb_ports] = port;
+ port->instance_id = queue->nb_ports;
+ queue->nb_ports++;
+ } else if (queue->q_pos == OPDL_Q_POS_END) {
+
+ /* tx port */
+ stage_inst = opdl_stage_add(
+ device->opdl[queue->opdl_id],
+ false,
+ false);
+ port->deq_stage_inst = stage_inst;
+ port->enq_stage_inst = NULL;
+ port->p_type = OPDL_PURE_TX_PORT;
+
+ /* Add the port to the queue array of ports */
+ queue->ports[queue->nb_ports] = port;
+ port->instance_id = queue->nb_ports;
+ queue->nb_ports++;
+ } else {
+
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "port %u:, linked incorrectly"
+ " to a q_pos START/INVALID %u",
+ opdl_pmd_dev_id(port->opdl),
+ port->id,
+ queue->q_pos);
+ err = -EINVAL;
+ break;
+ }
+
+ } else if (queue->q_type == OPDL_Q_TYPE_SINGLE_LINK) {
+
+ port->p_type = OPDL_ASYNC_PORT;
+
+ /* -- tx -- */
+ stage_inst = opdl_stage_add(
+ device->opdl[queue->opdl_id],
+ false,
+ false); /* First stage */
+ port->deq_stage_inst = stage_inst;
+
+ /* Add the port to the queue array of ports */
+ queue->ports[queue->nb_ports] = port;
+ port->instance_id = queue->nb_ports;
+ queue->nb_ports++;
+
+ if (queue->nb_ports > 1) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue %u:, setup as SINGLE_LINK"
+ " but has more than one port linked",
+ opdl_pmd_dev_id(port->opdl),
+ queue->external_qid);
+ err = -EINVAL;
+ break;
+ }
+
+ /* -- single instance rx for next opdl -- */
+ uint8_t next_qid =
+ device->q_map_ex_to_in[queue->external_qid] + 1;
+ if (next_qid < RTE_EVENT_MAX_QUEUES_PER_DEV &&
+ device->queue[next_qid].configured) {
+
+ /* Remap the queue */
+ queue = &device->queue[next_qid];
+
+ stage_inst = opdl_stage_add(
+ device->opdl[queue->opdl_id],
+ false,
+ true);
+ port->enq_stage_inst = stage_inst;
+
+ /* Add the port to the queue array of ports */
+ queue->ports[queue->nb_ports] = port;
+ port->instance_id = queue->nb_ports;
+ queue->nb_ports++;
+ if (queue->nb_ports > 1) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "dummy queue %u: for "
+ "port %u, "
+ "SINGLE_LINK but has more "
+ "than one port linked",
+ opdl_pmd_dev_id(port->opdl),
+ next_qid,
+ port->id);
+ err = -EINVAL;
+ break;
+ }
+ /* Set this queue to initialized as it is never
+ * referenced by any ports
+ */
+ queue->initialized = 1;
+ }
+ }
+ }
+
+ /* Now that all ports are initialised we need to
+ * setup the last bit of stage md
+ */
+ if (!err) {
+ for (i = 0; i < device->nb_ports; i++) {
+ struct opdl_port *port = &device->ports[i];
+ struct opdl_queue *queue =
+ &device->queue[port->queue_id];
+
+ if (port->configured &&
+ (port->queue_id != OPDL_INVALID_QID)) {
+ if (queue->nb_ports == 0) {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "queue:[%u] has no ports"
+ " linked to it",
+ opdl_pmd_dev_id(port->opdl),
+ port->id);
+ err = -EINVAL;
+ break;
+ }
+
+ port->num_instance = queue->nb_ports;
+ port->initialized = 1;
+ queue->initialized = 1;
+ } else {
+ PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
+ "Port:[%u] not configured invalid"
+ " queue configuration",
+ opdl_pmd_dev_id(port->opdl),
+ port->id);
+ err = -EINVAL;
+ break;
+ }
+ }
+ }
+ return err;
+}
+
+int
+initialise_queue_zero_ports(struct rte_eventdev *dev)
+{
+ int err = 0;
+ uint8_t mt_rx = 0;
+ struct opdl_stage *stage_inst = NULL;
+ struct opdl_queue *queue = NULL;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ /* Assign queue zero and figure out how many Q0 ports we have */
+ uint32_t i;
+ for (i = 0; i < device->nb_ports; i++) {
+ struct opdl_port *port = &device->ports[i];
+ if (port->queue_id == OPDL_INVALID_QID) {
+ port->queue_id = 0;
+ port->external_qid = OPDL_INVALID_QID;
+ port->p_type = OPDL_PURE_RX_PORT;
+ mt_rx++;
+ }
+ }
+
+ /* Create the stage */
+ stage_inst = opdl_stage_add(device->opdl[0],
+ (mt_rx > 1 ? true : false),
+ true);
+ if (stage_inst) {
+
+ /* Assign the new created input stage to all relevant ports */
+ for (i = 0; i < device->nb_ports; i++) {
+ struct opdl_port *port = &device->ports[i];
+ if (port->queue_id == 0) {
+ queue = &device->queue[port->queue_id];
+ port->enq_stage_inst = stage_inst;
+ port->deq_stage_inst = NULL;
+ port->configured = 1;
+ port->initialized = 1;
+
+ queue->ports[queue->nb_ports] = port;
+ port->instance_id = queue->nb_ports;
+ queue->nb_ports++;
+ }
+ }
+ } else {
+ err = -1;
+ }
+ return err;
+}
+
+int
+assign_internal_queue_ids(struct rte_eventdev *dev)
+{
+ int err = 0;
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+ uint32_t i;
+
+ for (i = 0; i < device->nb_ports; i++) {
+ struct opdl_port *port = &device->ports[i];
+ if (port->external_qid != OPDL_INVALID_QID) {
+ port->queue_id =
+ device->q_map_ex_to_in[port->external_qid];
+
+ /* Now do the external_qid of the next queue */
+ struct opdl_queue *queue =
+ &device->queue[port->queue_id];
+ if (queue->q_pos == OPDL_Q_POS_END)
+ port->next_external_qid =
+ device->queue[port->queue_id + 2].external_qid;
+ else
+ port->next_external_qid =
+ device->queue[port->queue_id + 1].external_qid;
+ }
+ }
+ return err;
+}
diff --git a/drivers/event/opdl/opdl_evdev_xstats.c b/drivers/event/opdl/opdl_evdev_xstats.c
new file mode 100644
index 00000000..0e6c6bd5
--- /dev/null
+++ b/drivers/event/opdl/opdl_evdev_xstats.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include "opdl_evdev.h"
+#include "opdl_log.h"
+
+static const char * const port_xstat_str[] = {
+
+ "claim_pkts_requested",
+ "claim_pkts_granted",
+ "claim_non_empty",
+ "claim_empty",
+ "total_cycles",
+};
+
+
+void
+opdl_xstats_init(struct rte_eventdev *dev)
+{
+ uint32_t i, j;
+
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return;
+
+ for (i = 0; i < device->max_port_nb; i++) {
+ struct opdl_port *port = &device->ports[i];
+
+ for (j = 0; j < max_num_port_xstat; j++) {
+ uint32_t index = (i * max_num_port_xstat) + j;
+
+ /* Name */
+ sprintf(device->port_xstat[index].stat.name,
+ "port_%02u_%s",
+ i,
+ port_xstat_str[j]);
+
+ /* ID */
+ device->port_xstat[index].id = index;
+
+ /* Stats ptr */
+ device->port_xstat[index].value = &port->port_stat[j];
+ }
+ }
+}
+
+int
+opdl_xstats_uninit(struct rte_eventdev *dev)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return 0;
+
+ memset(device->port_xstat,
+ 0,
+ sizeof(device->port_xstat));
+
+ return 0;
+}
+
+int
+opdl_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return -ENOTSUP;
+
+ if (mode == RTE_EVENT_DEV_XSTATS_DEVICE ||
+ mode == RTE_EVENT_DEV_XSTATS_QUEUE)
+ return -EINVAL;
+
+ if (queue_port_id >= device->max_port_nb)
+ return -EINVAL;
+
+ if (size < max_num_port_xstat)
+ return max_num_port_xstat;
+
+ uint32_t port_idx = queue_port_id * max_num_port_xstat;
+
+ uint32_t j;
+ for (j = 0; j < max_num_port_xstat; j++) {
+
+ strcpy(xstats_names[j].name,
+ device->port_xstat[j + port_idx].stat.name);
+ ids[j] = device->port_xstat[j + port_idx].id;
+ }
+
+ return max_num_port_xstat;
+}
+
+int
+opdl_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ const unsigned int ids[],
+ uint64_t values[], unsigned int n)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return -ENOTSUP;
+
+ if (mode == RTE_EVENT_DEV_XSTATS_DEVICE ||
+ mode == RTE_EVENT_DEV_XSTATS_QUEUE)
+ return -EINVAL;
+
+ if (queue_port_id >= device->max_port_nb)
+ return -EINVAL;
+
+ if (n > max_num_port_xstat)
+ return -EINVAL;
+
+ uint32_t p_start = queue_port_id * max_num_port_xstat;
+ uint32_t p_finish = p_start + max_num_port_xstat;
+
+ uint32_t i;
+ for (i = 0; i < n; i++) {
+ if (ids[i] < p_start || ids[i] >= p_finish)
+ return -EINVAL;
+
+ values[i] = *(device->port_xstat[ids[i]].value);
+ }
+
+ return n;
+}
+
+uint64_t
+opdl_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return -ENOTSUP;
+
+ uint32_t max_index = device->max_port_nb * max_num_port_xstat;
+
+ uint32_t i;
+ for (i = 0; i < max_index; i++) {
+
+ if (strncmp(name,
+ device->port_xstat[i].stat.name,
+ RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0) {
+ if (id != NULL)
+ *id = i;
+ if (device->port_xstat[i].value)
+ return *(device->port_xstat[i].value);
+ break;
+ }
+ }
+ return -EINVAL;
+}
+
+int
+opdl_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id, const uint32_t ids[],
+ uint32_t nb_ids)
+{
+ struct opdl_evdev *device = opdl_pmd_priv(dev);
+
+ if (!device->do_validation)
+ return -ENOTSUP;
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(mode);
+ RTE_SET_USED(queue_port_id);
+ RTE_SET_USED(ids);
+ RTE_SET_USED(nb_ids);
+
+ return -ENOTSUP;
+}
diff --git a/drivers/event/opdl/opdl_log.h b/drivers/event/opdl/opdl_log.h
new file mode 100644
index 00000000..ae5221c1
--- /dev/null
+++ b/drivers/event/opdl/opdl_log.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _OPDL_LOGS_H_
+#define _OPDL_LOGS_H_
+
+#include <rte_log.h>
+
+extern int opdl_logtype_driver;
+
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, opdl_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+
+
+#endif /* _OPDL_LOGS_H_ */
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
new file mode 100644
index 00000000..eca7712b
--- /dev/null
+++ b/drivers/event/opdl/opdl_ring.c
@@ -0,0 +1,1233 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_debug.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal_memconfig.h>
+
+#include "opdl_ring.h"
+#include "opdl_log.h"
+
+#define LIB_NAME "opdl_ring"
+
+#define OPDL_NAME_SIZE 64
+
+
+#define OPDL_EVENT_MASK (0xFFFF0000000FFFFFULL)
+
+int opdl_logtype_driver;
+
+/* Types of dependency between stages */
+enum dep_type {
+ DEP_NONE = 0, /* no dependency */
+ DEP_DIRECT, /* stage has direct dependency */
+ DEP_INDIRECT, /* in-direct dependency through other stage(s) */
+ DEP_SELF, /* stage dependency on itself, used to detect loops */
+};
+
+/* Shared section of stage state.
+ * Care is needed when accessing and the layout is important, especially to
+ * limit the adjacent cache-line HW prefetcher from impacting performance.
+ */
+struct shared_state {
+ /* Last known minimum sequence number of dependencies, used for multi
+ * thread operation
+ */
+ uint32_t available_seq;
+ char _pad1[RTE_CACHE_LINE_SIZE * 3];
+ uint32_t head; /* Head sequence number (for multi thread operation) */
+ char _pad2[RTE_CACHE_LINE_SIZE * 3];
+ struct opdl_stage *stage; /* back pointer */
+ uint32_t tail; /* Tail sequence number */
+ char _pad3[RTE_CACHE_LINE_SIZE * 2];
+} __rte_cache_aligned;
+
+/* A structure to keep track of "unfinished" claims. This is only used for
+ * stages that are threadsafe. Each lcore accesses its own instance of this
+ * structure to record the entries it has claimed. This allows one lcore to make
+ * multiple claims without being blocked by another. When disclaiming it moves
+ * forward the shared tail when the shared tail matches the tail value recorded
+ * here.
+ */
+struct claim_manager {
+ uint32_t num_to_disclaim;
+ uint32_t num_claimed;
+ uint32_t mgr_head;
+ uint32_t mgr_tail;
+ struct {
+ uint32_t head;
+ uint32_t tail;
+ } claims[OPDL_DISCLAIMS_PER_LCORE];
+} __rte_cache_aligned;
+
+/* Context for each stage of opdl_ring.
+ * Calculations on sequence numbers need to be done with other uint32_t values
+ * so that results are modulus 2^32, and not undefined.
+ */
+struct opdl_stage {
+ struct opdl_ring *t; /* back pointer, set at init */
+ uint32_t num_slots; /* Number of slots for entries, set at init */
+ uint32_t index; /* ID for this stage, set at init */
+ bool threadsafe; /* Set to 1 if this stage supports threadsafe use */
+ /* Last known min seq number of dependencies for used for single thread
+ * operation
+ */
+ uint32_t available_seq;
+ uint32_t head; /* Current head for single-thread operation */
+ uint32_t shadow_head; /* Shadow head for single-thread operation */
+ uint32_t nb_instance; /* Number of instances */
+ uint32_t instance_id; /* ID of this stage instance */
+ uint16_t num_claimed; /* Number of slots claimed */
+ uint16_t num_event; /* Number of events */
+ uint32_t seq; /* sequence number */
+ uint32_t num_deps; /* Number of direct dependencies */
+ /* Keep track of all dependencies, used during init only */
+ enum dep_type *dep_tracking;
+ /* Direct dependencies of this stage */
+ struct shared_state **deps;
+ /* Other stages read this! */
+ struct shared_state shared __rte_cache_aligned;
+ /* For managing disclaims in multi-threaded processing stages */
+ struct claim_manager pending_disclaims[RTE_MAX_LCORE]
+ __rte_cache_aligned;
+} __rte_cache_aligned;
+
+/* Context for opdl_ring */
+struct opdl_ring {
+ char name[OPDL_NAME_SIZE]; /* OPDL queue instance name */
+ int socket; /* NUMA socket that memory is allocated on */
+ uint32_t num_slots; /* Number of slots for entries */
+ uint32_t mask; /* Mask for sequence numbers (num_slots - 1) */
+ uint32_t slot_size; /* Size of each slot in bytes */
+ uint32_t num_stages; /* Number of stages that have been added */
+ uint32_t max_num_stages; /* Max number of stages */
+ /* Stages indexed by ID */
+ struct opdl_stage *stages;
+ /* Memory for storing slot data */
+ uint8_t slots[0] __rte_cache_aligned;
+};
+
+
+/* Return input stage of a opdl_ring */
+static __rte_always_inline struct opdl_stage *
+input_stage(const struct opdl_ring *t)
+{
+ return &t->stages[0];
+}
+
+/* Check if a stage is the input stage */
+static __rte_always_inline bool
+is_input_stage(const struct opdl_stage *s)
+{
+ return s->index == 0;
+}
+
+/* Get slot pointer from sequence number */
+static __rte_always_inline void *
+get_slot(const struct opdl_ring *t, uint32_t n)
+{
+ return (void *)(uintptr_t)&t->slots[(n & t->mask) * t->slot_size];
+}
+
+/* Find how many entries are available for processing */
+static __rte_always_inline uint32_t
+available(const struct opdl_stage *s)
+{
+ if (s->threadsafe == true) {
+ uint32_t n = __atomic_load_n(&s->shared.available_seq,
+ __ATOMIC_ACQUIRE) -
+ __atomic_load_n(&s->shared.head,
+ __ATOMIC_ACQUIRE);
+
+ /* Return 0 if available_seq needs to be updated */
+ return (n <= s->num_slots) ? n : 0;
+ }
+
+ /* Single threaded */
+ return s->available_seq - s->head;
+}
+
+/* Read sequence number of dependencies and find minimum */
+static __rte_always_inline void
+update_available_seq(struct opdl_stage *s)
+{
+ uint32_t i;
+ uint32_t this_tail = s->shared.tail;
+ uint32_t min_seq = __atomic_load_n(&s->deps[0]->tail, __ATOMIC_ACQUIRE);
+ /* Input stage sequence numbers are greater than the sequence numbers of
+ * its dependencies so an offset of t->num_slots is needed when
+ * calculating available slots and also the condition which is used to
+ * determine the dependencies minimum sequence number must be reverted.
+ */
+ uint32_t wrap;
+
+ if (is_input_stage(s)) {
+ wrap = s->num_slots;
+ for (i = 1; i < s->num_deps; i++) {
+ uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
+ __ATOMIC_ACQUIRE);
+ if ((this_tail - seq) > (this_tail - min_seq))
+ min_seq = seq;
+ }
+ } else {
+ wrap = 0;
+ for (i = 1; i < s->num_deps; i++) {
+ uint32_t seq = __atomic_load_n(&s->deps[i]->tail,
+ __ATOMIC_ACQUIRE);
+ if ((seq - this_tail) < (min_seq - this_tail))
+ min_seq = seq;
+ }
+ }
+
+ if (s->threadsafe == false)
+ s->available_seq = min_seq + wrap;
+ else
+ __atomic_store_n(&s->shared.available_seq, min_seq + wrap,
+ __ATOMIC_RELEASE);
+}
+
+/* Wait until the number of available slots reaches number requested */
+static __rte_always_inline void
+wait_for_available(struct opdl_stage *s, uint32_t n)
+{
+ while (available(s) < n) {
+ rte_pause();
+ update_available_seq(s);
+ }
+}
+
+/* Return number of slots to process based on number requested and mode */
+static __rte_always_inline uint32_t
+num_to_process(struct opdl_stage *s, uint32_t n, bool block)
+{
+ /* Don't read tail sequences of dependencies if not needed */
+ if (available(s) >= n)
+ return n;
+
+ update_available_seq(s);
+
+ if (block == false) {
+ uint32_t avail = available(s);
+
+ if (avail == 0) {
+ rte_pause();
+ return 0;
+ }
+ return (avail <= n) ? avail : n;
+ }
+
+ if (unlikely(n > s->num_slots)) {
+ PMD_DRV_LOG(ERR, "%u entries is more than max (%u)",
+ n, s->num_slots);
+ return 0; /* Avoid infinite loop */
+ }
+ /* blocking */
+ wait_for_available(s, n);
+ return n;
+}
+
+/* Copy entries in to slots with wrap-around */
+static __rte_always_inline void
+copy_entries_in(struct opdl_ring *t, uint32_t start, const void *entries,
+ uint32_t num_entries)
+{
+ uint32_t slot_size = t->slot_size;
+ uint32_t slot_index = start & t->mask;
+
+ if (slot_index + num_entries <= t->num_slots) {
+ rte_memcpy(get_slot(t, start), entries,
+ num_entries * slot_size);
+ } else {
+ uint32_t split = t->num_slots - slot_index;
+
+ rte_memcpy(get_slot(t, start), entries, split * slot_size);
+ rte_memcpy(get_slot(t, 0),
+ RTE_PTR_ADD(entries, split * slot_size),
+ (num_entries - split) * slot_size);
+ }
+}
+
+/* Copy entries out from slots with wrap-around */
+static __rte_always_inline void
+copy_entries_out(struct opdl_ring *t, uint32_t start, void *entries,
+ uint32_t num_entries)
+{
+ uint32_t slot_size = t->slot_size;
+ uint32_t slot_index = start & t->mask;
+
+ if (slot_index + num_entries <= t->num_slots) {
+ rte_memcpy(entries, get_slot(t, start),
+ num_entries * slot_size);
+ } else {
+ uint32_t split = t->num_slots - slot_index;
+
+ rte_memcpy(entries, get_slot(t, start), split * slot_size);
+ rte_memcpy(RTE_PTR_ADD(entries, split * slot_size),
+ get_slot(t, 0),
+ (num_entries - split) * slot_size);
+ }
+}
+
+/* Input function optimised for single thread */
+static __rte_always_inline uint32_t
+opdl_ring_input_singlethread(struct opdl_ring *t, const void *entries,
+ uint32_t num_entries, bool block)
+{
+ struct opdl_stage *s = input_stage(t);
+ uint32_t head = s->head;
+
+ num_entries = num_to_process(s, num_entries, block);
+ if (num_entries == 0)
+ return 0;
+
+ copy_entries_in(t, head, entries, num_entries);
+
+ s->head += num_entries;
+ __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+
+ return num_entries;
+}
+
+/* Convert head and tail of claim_manager into valid index */
+static __rte_always_inline uint32_t
+claim_mgr_index(uint32_t n)
+{
+ return n & (OPDL_DISCLAIMS_PER_LCORE - 1);
+}
+
+/* Check if there are available slots in claim_manager */
+static __rte_always_inline bool
+claim_mgr_available(struct claim_manager *mgr)
+{
+ return (mgr->mgr_head < (mgr->mgr_tail + OPDL_DISCLAIMS_PER_LCORE)) ?
+ true : false;
+}
+
+/* Record a new claim. Only use after first checking an entry is available */
+static __rte_always_inline void
+claim_mgr_add(struct claim_manager *mgr, uint32_t tail, uint32_t head)
+{
+ if ((mgr->mgr_head != mgr->mgr_tail) &&
+ (mgr->claims[claim_mgr_index(mgr->mgr_head - 1)].head ==
+ tail)) {
+ /* Combine with previous claim */
+ mgr->claims[claim_mgr_index(mgr->mgr_head - 1)].head = head;
+ } else {
+ mgr->claims[claim_mgr_index(mgr->mgr_head)].head = head;
+ mgr->claims[claim_mgr_index(mgr->mgr_head)].tail = tail;
+ mgr->mgr_head++;
+ }
+
+ mgr->num_claimed += (head - tail);
+}
+
+/* Read the oldest recorded claim */
+static __rte_always_inline bool
+claim_mgr_read(struct claim_manager *mgr, uint32_t *tail, uint32_t *head)
+{
+ if (mgr->mgr_head == mgr->mgr_tail)
+ return false;
+
+ *head = mgr->claims[claim_mgr_index(mgr->mgr_tail)].head;
+ *tail = mgr->claims[claim_mgr_index(mgr->mgr_tail)].tail;
+ return true;
+}
+
+/* Remove the oldest recorded claim. Only use after first reading the entry */
+static __rte_always_inline void
+claim_mgr_remove(struct claim_manager *mgr)
+{
+ mgr->num_claimed -= (mgr->claims[claim_mgr_index(mgr->mgr_tail)].head -
+ mgr->claims[claim_mgr_index(mgr->mgr_tail)].tail);
+ mgr->mgr_tail++;
+}
+
+/* Update tail in the oldest claim. Only use after first reading the entry */
+static __rte_always_inline void
+claim_mgr_move_tail(struct claim_manager *mgr, uint32_t num_entries)
+{
+ mgr->num_claimed -= num_entries;
+ mgr->claims[claim_mgr_index(mgr->mgr_tail)].tail += num_entries;
+}
+
+static __rte_always_inline void
+opdl_stage_disclaim_multithread_n(struct opdl_stage *s,
+ uint32_t num_entries, bool block)
+{
+ struct claim_manager *disclaims = &s->pending_disclaims[rte_lcore_id()];
+ uint32_t head;
+ uint32_t tail;
+
+ while (num_entries) {
+ bool ret = claim_mgr_read(disclaims, &tail, &head);
+
+ if (ret == false)
+ break; /* nothing is claimed */
+ /* There should be no race condition here. If shared.tail
+ * matches, no other core can update it until this one does.
+ */
+ if (__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) ==
+ tail) {
+ if (num_entries >= (head - tail)) {
+ claim_mgr_remove(disclaims);
+ __atomic_store_n(&s->shared.tail, head,
+ __ATOMIC_RELEASE);
+ num_entries -= (head - tail);
+ } else {
+ claim_mgr_move_tail(disclaims, num_entries);
+ __atomic_store_n(&s->shared.tail,
+ num_entries + tail,
+ __ATOMIC_RELEASE);
+ num_entries = 0;
+ }
+ } else if (block == false)
+ break; /* blocked by other thread */
+ /* Keep going until num_entries are disclaimed. */
+ rte_pause();
+ }
+
+ disclaims->num_to_disclaim = num_entries;
+}
+
+/* Move head atomically, returning number of entries available to process and
+ * the original value of head. For non-input stages, the claim is recorded
+ * so that the tail can be updated later by opdl_stage_disclaim().
+ */
+static __rte_always_inline void
+move_head_atomically(struct opdl_stage *s, uint32_t *num_entries,
+ uint32_t *old_head, bool block, bool claim_func)
+{
+ uint32_t orig_num_entries = *num_entries;
+ uint32_t ret;
+ struct claim_manager *disclaims = &s->pending_disclaims[rte_lcore_id()];
+
+ /* Attempt to disclaim any outstanding claims */
+ opdl_stage_disclaim_multithread_n(s, disclaims->num_to_disclaim,
+ false);
+
+ *old_head = __atomic_load_n(&s->shared.head, __ATOMIC_ACQUIRE);
+ while (true) {
+ bool success;
+ /* If called by opdl_ring_input(), claim does not need to be
+ * recorded, as there will be no disclaim.
+ */
+ if (claim_func) {
+ /* Check that the claim can be recorded */
+ ret = claim_mgr_available(disclaims);
+ if (ret == false) {
+ /* exit out if claim can't be recorded */
+ *num_entries = 0;
+ return;
+ }
+ }
+
+ *num_entries = num_to_process(s, orig_num_entries, block);
+ if (*num_entries == 0)
+ return;
+
+ success = __atomic_compare_exchange_n(&s->shared.head, old_head,
+ *old_head + *num_entries,
+ true, /* may fail spuriously */
+ __ATOMIC_RELEASE, /* memory order on success */
+ __ATOMIC_ACQUIRE); /* memory order on fail */
+ if (likely(success))
+ break;
+ rte_pause();
+ }
+
+ if (claim_func)
+ /* Store the claim record */
+ claim_mgr_add(disclaims, *old_head, *old_head + *num_entries);
+}
+
+/* Input function that supports multiple threads */
+static __rte_always_inline uint32_t
+opdl_ring_input_multithread(struct opdl_ring *t, const void *entries,
+ uint32_t num_entries, bool block)
+{
+ struct opdl_stage *s = input_stage(t);
+ uint32_t old_head;
+
+ move_head_atomically(s, &num_entries, &old_head, block, false);
+ if (num_entries == 0)
+ return 0;
+
+ copy_entries_in(t, old_head, entries, num_entries);
+
+ /* If another thread started inputting before this one, but hasn't
+ * finished, we need to wait for it to complete to update the tail.
+ */
+ while (unlikely(__atomic_load_n(&s->shared.tail, __ATOMIC_ACQUIRE) !=
+ old_head))
+ rte_pause();
+
+ __atomic_store_n(&s->shared.tail, old_head + num_entries,
+ __ATOMIC_RELEASE);
+
+ return num_entries;
+}
+
+static __rte_always_inline uint32_t
+opdl_first_entry_id(uint32_t start_seq, uint8_t nb_p_lcores,
+ uint8_t this_lcore)
+{
+ return ((nb_p_lcores <= 1) ? 0 :
+ (nb_p_lcores - (start_seq % nb_p_lcores) + this_lcore) %
+ nb_p_lcores);
+}
+
+/* Claim slots to process, optimised for single-thread operation */
+static __rte_always_inline uint32_t
+opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
+{
+ uint32_t i = 0, j = 0, offset;
+ void *get_slots;
+ struct rte_event *ev;
+ RTE_SET_USED(seq);
+ struct opdl_ring *t = s->t;
+ uint8_t *entries_offset = (uint8_t *)entries;
+
+ if (!atomic) {
+
+ offset = opdl_first_entry_id(s->seq, s->nb_instance,
+ s->instance_id);
+
+ num_entries = s->nb_instance * num_entries;
+
+ num_entries = num_to_process(s, num_entries, block);
+
+ for (; offset < num_entries; offset += s->nb_instance) {
+ get_slots = get_slot(t, s->head + offset);
+ memcpy(entries_offset, get_slots, t->slot_size);
+ entries_offset += t->slot_size;
+ i++;
+ }
+ } else {
+ num_entries = num_to_process(s, num_entries, block);
+
+ for (j = 0; j < num_entries; j++) {
+ ev = (struct rte_event *)get_slot(t, s->head+j);
+ if ((ev->flow_id%s->nb_instance) == s->instance_id) {
+ memcpy(entries_offset, ev, t->slot_size);
+ entries_offset += t->slot_size;
+ i++;
+ }
+ }
+ }
+ s->shadow_head = s->head;
+ s->head += num_entries;
+ s->num_claimed = num_entries;
+ s->num_event = i;
+
+ /* automatically disclaim entries if number of rte_events is zero */
+ if (unlikely(i == 0))
+ opdl_stage_disclaim(s, 0, false);
+
+ return i;
+}
+
+/* Thread-safe version of function to claim slots for processing */
+static __rte_always_inline uint32_t
+opdl_stage_claim_multithread(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block)
+{
+ uint32_t old_head;
+ struct opdl_ring *t = s->t;
+ uint32_t i = 0, offset;
+ uint8_t *entries_offset = (uint8_t *)entries;
+
+ if (seq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid seq PTR");
+ return 0;
+ }
+ offset = opdl_first_entry_id(*seq, s->nb_instance, s->instance_id);
+ num_entries = offset + (s->nb_instance * num_entries);
+
+ move_head_atomically(s, &num_entries, &old_head, block, true);
+
+ for (; offset < num_entries; offset += s->nb_instance) {
+ memcpy(entries_offset, get_slot(t, s->head + offset),
+ t->slot_size);
+ entries_offset += t->slot_size;
+ i++;
+ }
+
+ *seq = old_head;
+
+ return i;
+}
+
+/* Claim and copy slot pointers, optimised for single-thread operation */
+static __rte_always_inline uint32_t
+opdl_stage_claim_copy_singlethread(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block)
+{
+ num_entries = num_to_process(s, num_entries, block);
+ if (num_entries == 0)
+ return 0;
+ copy_entries_out(s->t, s->head, entries, num_entries);
+ if (seq != NULL)
+ *seq = s->head;
+ s->head += num_entries;
+ return num_entries;
+}
+
+/* Thread-safe version of function to claim and copy pointers to slots */
+static __rte_always_inline uint32_t
+opdl_stage_claim_copy_multithread(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block)
+{
+ uint32_t old_head;
+
+ move_head_atomically(s, &num_entries, &old_head, block, true);
+ if (num_entries == 0)
+ return 0;
+ copy_entries_out(s->t, old_head, entries, num_entries);
+ if (seq != NULL)
+ *seq = old_head;
+ return num_entries;
+}
+
+static __rte_always_inline void
+opdl_stage_disclaim_singlethread_n(struct opdl_stage *s,
+ uint32_t num_entries)
+{
+ uint32_t old_tail = s->shared.tail;
+
+ if (unlikely(num_entries > (s->head - old_tail))) {
+ PMD_DRV_LOG(WARNING, "Attempt to disclaim (%u) more than claimed (%u)",
+ num_entries, s->head - old_tail);
+ num_entries = s->head - old_tail;
+ }
+ __atomic_store_n(&s->shared.tail, num_entries + old_tail,
+ __ATOMIC_RELEASE);
+}
+
+uint32_t
+opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
+ bool block)
+{
+ if (input_stage(t)->threadsafe == false)
+ return opdl_ring_input_singlethread(t, entries, num_entries,
+ block);
+ else
+ return opdl_ring_input_multithread(t, entries, num_entries,
+ block);
+}
+
+uint32_t
+opdl_ring_copy_from_burst(struct opdl_ring *t, struct opdl_stage *s,
+ const void *entries, uint32_t num_entries, bool block)
+{
+ uint32_t head = s->head;
+
+ num_entries = num_to_process(s, num_entries, block);
+
+ if (num_entries == 0)
+ return 0;
+
+ copy_entries_in(t, head, entries, num_entries);
+
+ s->head += num_entries;
+ __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+
+ return num_entries;
+
+}
+
+uint32_t
+opdl_ring_copy_to_burst(struct opdl_ring *t, struct opdl_stage *s,
+ void *entries, uint32_t num_entries, bool block)
+{
+ uint32_t head = s->head;
+
+ num_entries = num_to_process(s, num_entries, block);
+ if (num_entries == 0)
+ return 0;
+
+ copy_entries_out(t, head, entries, num_entries);
+
+ s->head += num_entries;
+ __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+
+ return num_entries;
+}
+
+uint32_t
+opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries)
+{
+ /* return (num_to_process(s, num_entries, false)); */
+
+ if (available(s) >= num_entries)
+ return num_entries;
+
+ update_available_seq(s);
+
+ uint32_t avail = available(s);
+
+ if (avail == 0) {
+ rte_pause();
+ return 0;
+ }
+ return (avail <= num_entries) ? avail : num_entries;
+}
+
+uint32_t
+opdl_stage_claim(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
+{
+ if (s->threadsafe == false)
+ return opdl_stage_claim_singlethread(s, entries, num_entries,
+ seq, block, atomic);
+ else
+ return opdl_stage_claim_multithread(s, entries, num_entries,
+ seq, block);
+}
+
+uint32_t
+opdl_stage_claim_copy(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block)
+{
+ if (s->threadsafe == false)
+ return opdl_stage_claim_copy_singlethread(s, entries,
+ num_entries, seq, block);
+ else
+ return opdl_stage_claim_copy_multithread(s, entries,
+ num_entries, seq, block);
+}
+
+void
+opdl_stage_disclaim_n(struct opdl_stage *s, uint32_t num_entries,
+ bool block)
+{
+
+ if (s->threadsafe == false) {
+ opdl_stage_disclaim_singlethread_n(s, s->num_claimed);
+ } else {
+ struct claim_manager *disclaims =
+ &s->pending_disclaims[rte_lcore_id()];
+
+ if (unlikely(num_entries > s->num_slots)) {
+ PMD_DRV_LOG(WARNING, "Attempt to disclaim (%u) more than claimed (%u)",
+ num_entries, disclaims->num_claimed);
+ num_entries = disclaims->num_claimed;
+ }
+
+ num_entries = RTE_MIN(num_entries + disclaims->num_to_disclaim,
+ disclaims->num_claimed);
+ opdl_stage_disclaim_multithread_n(s, num_entries, block);
+ }
+}
+
+int
+opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries, bool block)
+{
+ if (num_entries != s->num_event) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+ if (s->threadsafe == false) {
+ __atomic_store_n(&s->shared.tail, s->head, __ATOMIC_RELEASE);
+ s->seq += s->num_claimed;
+ s->shadow_head = s->head;
+ s->num_claimed = 0;
+ } else {
+ struct claim_manager *disclaims =
+ &s->pending_disclaims[rte_lcore_id()];
+ opdl_stage_disclaim_multithread_n(s, disclaims->num_claimed,
+ block);
+ }
+ return num_entries;
+}
+
+uint32_t
+opdl_ring_available(struct opdl_ring *t)
+{
+ return opdl_stage_available(&t->stages[0]);
+}
+
+uint32_t
+opdl_stage_available(struct opdl_stage *s)
+{
+ update_available_seq(s);
+ return available(s);
+}
+
+void
+opdl_ring_flush(struct opdl_ring *t)
+{
+ struct opdl_stage *s = input_stage(t);
+
+ wait_for_available(s, s->num_slots);
+}
+
+/******************** Non performance sensitive functions ********************/
+
+/* Initial setup of a new stage's context */
+static int
+init_stage(struct opdl_ring *t, struct opdl_stage *s, bool threadsafe,
+ bool is_input)
+{
+ uint32_t available = (is_input) ? t->num_slots : 0;
+
+ s->t = t;
+ s->num_slots = t->num_slots;
+ s->index = t->num_stages;
+ s->threadsafe = threadsafe;
+ s->shared.stage = s;
+
+ /* Alloc memory for deps */
+ s->dep_tracking = rte_zmalloc_socket(LIB_NAME,
+ t->max_num_stages * sizeof(enum dep_type),
+ 0, t->socket);
+ if (s->dep_tracking == NULL)
+ return -ENOMEM;
+
+ s->deps = rte_zmalloc_socket(LIB_NAME,
+ t->max_num_stages * sizeof(struct shared_state *),
+ 0, t->socket);
+ if (s->deps == NULL) {
+ rte_free(s->dep_tracking);
+ return -ENOMEM;
+ }
+
+ s->dep_tracking[s->index] = DEP_SELF;
+
+ if (threadsafe == true)
+ s->shared.available_seq = available;
+ else
+ s->available_seq = available;
+
+ return 0;
+}
+
+/* Add direct or indirect dependencies between stages */
+static int
+add_dep(struct opdl_stage *dependent, const struct opdl_stage *dependency,
+ enum dep_type type)
+{
+ struct opdl_ring *t = dependent->t;
+ uint32_t i;
+
+ /* Add new direct dependency */
+ if ((type == DEP_DIRECT) &&
+ (dependent->dep_tracking[dependency->index] ==
+ DEP_NONE)) {
+ PMD_DRV_LOG(DEBUG, "%s:%u direct dependency on %u",
+ t->name, dependent->index, dependency->index);
+ dependent->dep_tracking[dependency->index] = DEP_DIRECT;
+ }
+
+ /* Add new indirect dependency or change direct to indirect */
+ if ((type == DEP_INDIRECT) &&
+ ((dependent->dep_tracking[dependency->index] ==
+ DEP_NONE) ||
+ (dependent->dep_tracking[dependency->index] ==
+ DEP_DIRECT))) {
+ PMD_DRV_LOG(DEBUG, "%s:%u indirect dependency on %u",
+ t->name, dependent->index, dependency->index);
+ dependent->dep_tracking[dependency->index] = DEP_INDIRECT;
+ }
+
+ /* Shouldn't happen... */
+ if ((dependent->dep_tracking[dependency->index] == DEP_SELF) &&
+ (dependent != input_stage(t))) {
+ PMD_DRV_LOG(ERR, "Loop in dependency graph %s:%u",
+ t->name, dependent->index);
+ return -EINVAL;
+ }
+
+ /* Keep going to dependencies of the dependency, until input stage */
+ if (dependency != input_stage(t))
+ for (i = 0; i < dependency->num_deps; i++) {
+ int ret = add_dep(dependent, dependency->deps[i]->stage,
+ DEP_INDIRECT);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Make list of sequence numbers for direct dependencies only */
+ if (type == DEP_DIRECT)
+ for (i = 0, dependent->num_deps = 0; i < t->num_stages; i++)
+ if (dependent->dep_tracking[i] == DEP_DIRECT) {
+ if ((i == 0) && (dependent->num_deps > 1))
+ rte_panic("%s:%u depends on > input",
+ t->name,
+ dependent->index);
+ dependent->deps[dependent->num_deps++] =
+ &t->stages[i].shared;
+ }
+
+ return 0;
+}
+
+struct opdl_ring *
+opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size,
+ uint32_t max_num_stages, int socket)
+{
+ struct opdl_ring *t;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int mz_flags = 0;
+ struct opdl_stage *st = NULL;
+ const struct rte_memzone *mz = NULL;
+ size_t alloc_size = RTE_CACHE_LINE_ROUNDUP(sizeof(*t) +
+ (num_slots * slot_size));
+
+ /* Compile time checking */
+ RTE_BUILD_BUG_ON((sizeof(struct shared_state) & RTE_CACHE_LINE_MASK) !=
+ 0);
+ RTE_BUILD_BUG_ON((offsetof(struct opdl_stage, shared) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((offsetof(struct opdl_ring, slots) &
+ RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON(!rte_is_power_of_2(OPDL_DISCLAIMS_PER_LCORE));
+
+ /* Parameter checking */
+ if (name == NULL) {
+ PMD_DRV_LOG(ERR, "name param is NULL");
+ return NULL;
+ }
+ if (!rte_is_power_of_2(num_slots)) {
+ PMD_DRV_LOG(ERR, "num_slots (%u) for %s is not power of 2",
+ num_slots, name);
+ return NULL;
+ }
+
+ /* Alloc memory for stages */
+ st = rte_zmalloc_socket(LIB_NAME,
+ max_num_stages * sizeof(struct opdl_stage),
+ RTE_CACHE_LINE_SIZE, socket);
+ if (st == NULL)
+ goto exit_fail;
+
+ snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, name);
+
+ /* Alloc memory for memzone */
+ mz = rte_memzone_reserve(mz_name, alloc_size, socket, mz_flags);
+ if (mz == NULL)
+ goto exit_fail;
+
+ t = mz->addr;
+
+ /* Initialise opdl_ring queue */
+ memset(t, 0, sizeof(*t));
+ snprintf(t->name, sizeof(t->name), "%s", name);
+ t->socket = socket;
+ t->num_slots = num_slots;
+ t->mask = num_slots - 1;
+ t->slot_size = slot_size;
+ t->max_num_stages = max_num_stages;
+ t->stages = st;
+
+ PMD_DRV_LOG(DEBUG, "Created %s at %p (num_slots=%u,socket=%i,slot_size=%u)",
+ t->name, t, num_slots, socket, slot_size);
+
+ return t;
+
+exit_fail:
+ PMD_DRV_LOG(ERR, "Cannot reserve memory");
+ rte_free(st);
+ rte_memzone_free(mz);
+
+ return NULL;
+}
+
+void *
+opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index)
+{
+ return get_slot(t, index);
+}
+
+bool
+opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
+ uint32_t index, bool atomic)
+{
+ uint32_t i = 0, j = 0, offset;
+ struct opdl_ring *t = s->t;
+ struct rte_event *ev_orig = NULL;
+ bool ev_updated = false;
+ uint64_t ev_temp = 0;
+
+ if (index > s->num_event) {
+ PMD_DRV_LOG(ERR, "index is overflow");
+ return ev_updated;
+ }
+
+ ev_temp = ev->event&OPDL_EVENT_MASK;
+
+ if (!atomic) {
+ offset = opdl_first_entry_id(s->seq, s->nb_instance,
+ s->instance_id);
+ offset += index*s->nb_instance;
+ ev_orig = get_slot(t, s->shadow_head+offset);
+ if ((ev_orig->event&OPDL_EVENT_MASK) != ev_temp) {
+ ev_orig->event = ev->event;
+ ev_updated = true;
+ }
+ if (ev_orig->u64 != ev->u64) {
+ ev_orig->u64 = ev->u64;
+ ev_updated = true;
+ }
+
+ } else {
+ for (i = 0; i < s->num_claimed; i++) {
+ ev_orig = (struct rte_event *)
+ get_slot(t, s->shadow_head+i);
+
+ if ((ev_orig->flow_id%s->nb_instance) ==
+ s->instance_id) {
+
+ if (j == index) {
+ if ((ev_orig->event&OPDL_EVENT_MASK) !=
+ ev_temp) {
+ ev_orig->event = ev->event;
+ ev_updated = true;
+ }
+ if (ev_orig->u64 != ev->u64) {
+ ev_orig->u64 = ev->u64;
+ ev_updated = true;
+ }
+
+ break;
+ }
+ j++;
+ }
+ }
+
+ }
+
+ return ev_updated;
+}
+
+int
+opdl_ring_get_socket(const struct opdl_ring *t)
+{
+ return t->socket;
+}
+
+uint32_t
+opdl_ring_get_num_slots(const struct opdl_ring *t)
+{
+ return t->num_slots;
+}
+
+const char *
+opdl_ring_get_name(const struct opdl_ring *t)
+{
+ return t->name;
+}
+
+/* Check dependency list is valid for a given opdl_ring */
+static int
+check_deps(struct opdl_ring *t, struct opdl_stage *deps[],
+ uint32_t num_deps)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_deps; ++i) {
+ if (!deps[i]) {
+ PMD_DRV_LOG(ERR, "deps[%u] is NULL", i);
+ return -EINVAL;
+ }
+ if (t != deps[i]->t) {
+ PMD_DRV_LOG(ERR, "deps[%u] is in opdl_ring %s, not %s",
+ i, deps[i]->t->name, t->name);
+ return -EINVAL;
+ }
+ }
+ if (num_deps > t->num_stages) {
+ PMD_DRV_LOG(ERR, "num_deps (%u) > number stages (%u)",
+ num_deps, t->num_stages);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+struct opdl_stage *
+opdl_stage_add(struct opdl_ring *t, bool threadsafe, bool is_input)
+{
+ struct opdl_stage *s;
+
+ /* Parameter checking */
+ if (!t) {
+ PMD_DRV_LOG(ERR, "opdl_ring is NULL");
+ return NULL;
+ }
+ if (t->num_stages == t->max_num_stages) {
+ PMD_DRV_LOG(ERR, "%s has max number of stages (%u)",
+ t->name, t->max_num_stages);
+ return NULL;
+ }
+
+ s = &t->stages[t->num_stages];
+
+ if (((uintptr_t)&s->shared & RTE_CACHE_LINE_MASK) != 0)
+ PMD_DRV_LOG(WARNING, "Tail seq num (%p) of %s stage not cache aligned",
+ &s->shared, t->name);
+
+ if (init_stage(t, s, threadsafe, is_input) < 0) {
+ PMD_DRV_LOG(ERR, "Cannot reserve memory");
+ return NULL;
+ }
+ t->num_stages++;
+
+ return s;
+}
+
+uint32_t
+opdl_stage_deps_add(struct opdl_ring *t, struct opdl_stage *s,
+ uint32_t nb_instance, uint32_t instance_id,
+ struct opdl_stage *deps[],
+ uint32_t num_deps)
+{
+ uint32_t i;
+ int ret = 0;
+
+ if ((num_deps > 0) && (!deps)) {
+ PMD_DRV_LOG(ERR, "%s stage has NULL dependencies", t->name);
+ return -1;
+ }
+ ret = check_deps(t, deps, num_deps);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_deps; i++) {
+ ret = add_dep(s, deps[i], DEP_DIRECT);
+ if (ret < 0)
+ return ret;
+ }
+
+ s->nb_instance = nb_instance;
+ s->instance_id = instance_id;
+
+ return ret;
+}
+
+struct opdl_stage *
+opdl_ring_get_input_stage(const struct opdl_ring *t)
+{
+ return input_stage(t);
+}
+
+int
+opdl_stage_set_deps(struct opdl_stage *s, struct opdl_stage *deps[],
+ uint32_t num_deps)
+{
+ unsigned int i;
+ int ret;
+
+ if ((num_deps == 0) || (!deps)) {
+ PMD_DRV_LOG(ERR, "cannot set NULL dependencies");
+ return -EINVAL;
+ }
+
+ ret = check_deps(s->t, deps, num_deps);
+ if (ret < 0)
+ return ret;
+
+ /* Update deps */
+ for (i = 0; i < num_deps; i++)
+ s->deps[i] = &deps[i]->shared;
+ s->num_deps = num_deps;
+
+ return 0;
+}
+
+struct opdl_ring *
+opdl_stage_get_opdl_ring(const struct opdl_stage *s)
+{
+ return s->t;
+}
+
+void
+opdl_ring_dump(const struct opdl_ring *t, FILE *f)
+{
+ uint32_t i;
+
+ if (t == NULL) {
+ fprintf(f, "NULL OPDL!\n");
+ return;
+ }
+ fprintf(f, "OPDL \"%s\": num_slots=%u; mask=%#x; slot_size=%u; num_stages=%u; socket=%i\n",
+ t->name, t->num_slots, t->mask, t->slot_size,
+ t->num_stages, t->socket);
+ for (i = 0; i < t->num_stages; i++) {
+ uint32_t j;
+ const struct opdl_stage *s = &t->stages[i];
+
+ fprintf(f, " %s[%u]: threadsafe=%s; head=%u; available_seq=%u; tail=%u; deps=%u",
+ t->name, i, (s->threadsafe) ? "true" : "false",
+ (s->threadsafe) ? s->shared.head : s->head,
+ (s->threadsafe) ? s->shared.available_seq :
+ s->available_seq,
+ s->shared.tail, (s->num_deps > 0) ?
+ s->deps[0]->stage->index : 0);
+ for (j = 1; j < s->num_deps; j++)
+ fprintf(f, ",%u", s->deps[j]->stage->index);
+ fprintf(f, "\n");
+ }
+ fflush(f);
+}
+
+void
+opdl_ring_free(struct opdl_ring *t)
+{
+ uint32_t i;
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+
+ if (t == NULL) {
+ PMD_DRV_LOG(DEBUG, "Freeing NULL OPDL Ring!");
+ return;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Freeing %s opdl_ring at %p", t->name, t);
+
+ for (i = 0; i < t->num_stages; ++i) {
+ rte_free(t->stages[i].deps);
+ rte_free(t->stages[i].dep_tracking);
+ }
+
+ rte_free(t->stages);
+
+ snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, t->name);
+ mz = rte_memzone_lookup(mz_name);
+ if (rte_memzone_free(mz) != 0)
+ PMD_DRV_LOG(ERR, "Cannot free memzone for %s", t->name);
+}
+
+/* search a opdl_ring from its name */
+struct opdl_ring *
+opdl_ring_lookup(const char *name)
+{
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+
+ snprintf(mz_name, sizeof(mz_name), "%s%s", LIB_NAME, name);
+
+ mz = rte_memzone_lookup(mz_name);
+ if (mz == NULL)
+ return NULL;
+
+ return mz->addr;
+}
+
+void
+opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe)
+{
+ s->threadsafe = threadsafe;
+}
diff --git a/drivers/event/opdl/opdl_ring.h b/drivers/event/opdl/opdl_ring.h
new file mode 100644
index 00000000..9e8c33e6
--- /dev/null
+++ b/drivers/event/opdl/opdl_ring.h
@@ -0,0 +1,600 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _OPDL_H_
+#define _OPDL_H_
+
+/**
+ * @file
+ * The "opdl_ring" is a data structure that contains a fixed number of slots,
+ * with each slot having the same, but configurable, size. Entries are input
+ * into the opdl_ring by copying into available slots. Once in the opdl_ring,
+ * an entry is processed by a number of stages, with the ordering of stage
+ * processing controlled by making stages dependent on one or more other stages.
+ * An entry is not available for a stage to process until it has been processed
+ * by that stages dependencies. Entries are always made available for
+ * processing in the same order that they were input in to the opdl_ring.
+ * Inputting is considered as a stage that depends on all other stages,
+ * and is also a dependency of all stages.
+ *
+ * Inputting and processing in a stage can support multi-threading. Note that
+ * multi-thread processing can also be done by making stages co-operate e.g. two
+ * stages where one processes the even packets and the other processes odd
+ * packets.
+ *
+ * A opdl_ring can be used as the basis for pipeline based applications. Instead
+ * of each stage in a pipeline dequeueing from a ring, processing and enqueueing
+ * to another ring, it can process entries in-place on the ring. If stages do
+ * not depend on each other, they can run in parallel.
+ *
+ * The opdl_ring works with entries of configurable size, these could be
+ * pointers to mbufs, pointers to mbufs with application specific meta-data,
+ * tasks etc.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <rte_eventdev.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef OPDL_DISCLAIMS_PER_LCORE
+/** Multi-threaded processing allows one thread to process multiple batches in a
+ * stage, while another thread is processing a single large batch. This number
+ * controls how many non-contiguous batches one stage can process before being
+ * blocked by the other stage.
+ */
+#define OPDL_DISCLAIMS_PER_LCORE 8
+#endif
+
+/** Opaque handle to a opdl_ring instance */
+struct opdl_ring;
+
+/** Opaque handle to a single stage in a opdl_ring */
+struct opdl_stage;
+
+/**
+ * Create a new instance of a opdl_ring.
+ *
+ * @param name
+ * String containing the name to give the new opdl_ring instance.
+ * @param num_slots
+ * How many slots the opdl_ring contains. Must be a power a 2!
+ * @param slot_size
+ * How many bytes in each slot.
+ * @param max_num_stages
+ * Maximum number of stages.
+ * @param socket
+ * The NUMA socket (or SOCKET_ID_ANY) to allocate the memory used for this
+ * opdl_ring instance.
+ * @param threadsafe
+ * Whether to support multiple threads inputting to the opdl_ring or not.
+ * Enabling this may have a negative impact on performance if only one thread
+ * will be inputting.
+ *
+ * @return
+ * A pointer to a new opdl_ring instance, or NULL on error.
+ */
+struct opdl_ring *
+opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size,
+ uint32_t max_num_stages, int socket);
+
+/**
+ * Get pointer to individual slot in a opdl_ring.
+ *
+ * @param t
+ * The opdl_ring.
+ * @param index
+ * Index of slot. If greater than the number of slots it will be masked to be
+ * within correct range.
+ *
+ * @return
+ * A pointer to that slot.
+ */
+void *
+opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index);
+
+/**
+ * Get NUMA socket used by a opdl_ring.
+ *
+ * @param t
+ * The opdl_ring.
+ *
+ * @return
+ * NUMA socket.
+ */
+int
+opdl_ring_get_socket(const struct opdl_ring *t);
+
+/**
+ * Get number of slots in a opdl_ring.
+ *
+ * @param t
+ * The opdl_ring.
+ *
+ * @return
+ * Number of slots.
+ */
+uint32_t
+opdl_ring_get_num_slots(const struct opdl_ring *t);
+
+/**
+ * Get name of a opdl_ring.
+ *
+ * @param t
+ * The opdl_ring.
+ *
+ * @return
+ * Name string.
+ */
+const char *
+opdl_ring_get_name(const struct opdl_ring *t);
+
+/**
+ * Adds a new processing stage to a specified opdl_ring instance. Adding a stage
+ * while there are entries in the opdl_ring being processed will cause undefined
+ * behaviour.
+ *
+ * @param t
+ * The opdl_ring to add the stage to.
+ * @param deps
+ * An array of pointers to other stages that this stage depends on. The other
+ * stages must be part of the same opdl_ring! Note that input is an implied
+ * dependency. This can be NULL if num_deps is 0.
+ * @param num_deps
+ * The size of the deps array.
+ * @param threadsafe
+ * Whether to support multiple threads processing this stage or not.
+ * Enabling this may have a negative impact on performance if only one thread
+ * will be processing this stage.
+ * @param is_input
+ * Indication to nitialise the stage with all slots available or none
+ *
+ * @return
+ * A pointer to the new stage, or NULL on error.
+ */
+struct opdl_stage *
+opdl_stage_add(struct opdl_ring *t, bool threadsafe, bool is_input);
+
+/**
+ * Returns the input stage of a opdl_ring to be used by other API functions.
+ *
+ * @param t
+ * The opdl_ring.
+ *
+ * @return
+ * A pointer to the input stage.
+ */
+struct opdl_stage *
+opdl_ring_get_input_stage(const struct opdl_ring *t);
+
+/**
+ * Sets the dependencies for a stage (clears all the previous deps!). Changing
+ * dependencies while there are entries in the opdl_ring being processed will
+ * cause undefined behaviour.
+ *
+ * @param s
+ * The stage to set the dependencies for.
+ * @param deps
+ * An array of pointers to other stages that this stage will depends on. The
+ * other stages must be part of the same opdl_ring!
+ * @param num_deps
+ * The size of the deps array. This must be > 0.
+ *
+ * @return
+ * 0 on success, a negative value on error.
+ */
+int
+opdl_stage_set_deps(struct opdl_stage *s, struct opdl_stage *deps[],
+ uint32_t num_deps);
+
+/**
+ * Returns the opdl_ring that a stage belongs to.
+ *
+ * @param s
+ * The stage
+ *
+ * @return
+ * A pointer to the opdl_ring that the stage belongs to.
+ */
+struct opdl_ring *
+opdl_stage_get_opdl_ring(const struct opdl_stage *s);
+
+/**
+ * Inputs a new batch of entries into the opdl_ring. This function is only
+ * threadsafe (with the same opdl_ring parameter) if the threadsafe parameter of
+ * opdl_ring_create() was true. For performance reasons, this function does not
+ * check input parameters.
+ *
+ * @param t
+ * The opdl_ring to input entries in to.
+ * @param entries
+ * An array of entries that will be copied in to the opdl_ring.
+ * @param num_entries
+ * The size of the entries array.
+ * @param block
+ * If this is true, the function blocks until enough slots are available to
+ * input all the requested entries. If false, then the function inputs as
+ * many entries as currently possible.
+ *
+ * @return
+ * The number of entries successfully input.
+ */
+uint32_t
+opdl_ring_input(struct opdl_ring *t, const void *entries, uint32_t num_entries,
+ bool block);
+
+/**
+ * Inputs a new batch of entries into a opdl stage. This function is only
+ * threadsafe (with the same opdl parameter) if the threadsafe parameter of
+ * opdl_create() was true. For performance reasons, this function does not
+ * check input parameters.
+ *
+ * @param t
+ * The opdl ring to input entries in to.
+ * @param s
+ * The stage to copy entries to.
+ * @param entries
+ * An array of entries that will be copied in to the opdl ring.
+ * @param num_entries
+ * The size of the entries array.
+ * @param block
+ * If this is true, the function blocks until enough slots are available to
+ * input all the requested entries. If false, then the function inputs as
+ * many entries as currently possible.
+ *
+ * @return
+ * The number of entries successfully input.
+ */
+uint32_t
+opdl_ring_copy_from_burst(struct opdl_ring *t, struct opdl_stage *s,
+ const void *entries, uint32_t num_entries, bool block);
+
+/**
+ * Copy a batch of entries from the opdl ring. This function is only
+ * threadsafe (with the same opdl parameter) if the threadsafe parameter of
+ * opdl_create() was true. For performance reasons, this function does not
+ * check input parameters.
+ *
+ * @param t
+ * The opdl ring to copy entries from.
+ * @param s
+ * The stage to copy entries from.
+ * @param entries
+ * An array of entries that will be copied from the opdl ring.
+ * @param num_entries
+ * The size of the entries array.
+ * @param block
+ * If this is true, the function blocks until enough slots are available to
+ * input all the requested entries. If false, then the function inputs as
+ * many entries as currently possible.
+ *
+ * @return
+ * The number of entries successfully input.
+ */
+uint32_t
+opdl_ring_copy_to_burst(struct opdl_ring *t, struct opdl_stage *s,
+ void *entries, uint32_t num_entries, bool block);
+
+/**
+ * Before processing a batch of entries, a stage must first claim them to get
+ * access. This function is threadsafe using same opdl_stage parameter if
+ * the stage was created with threadsafe set to true, otherwise it is only
+ * threadsafe with a different opdl_stage per thread. For performance
+ * reasons, this function does not check input parameters.
+ *
+ * @param s
+ * The opdl_ring stage to read entries in.
+ * @param entries
+ * An array of pointers to entries that will be filled in by this function.
+ * @param num_entries
+ * The number of entries to attempt to claim for processing (and the size of
+ * the entries array).
+ * @param seq
+ * If not NULL, this is set to the value of the internal stage sequence number
+ * associated with the first entry returned.
+ * @param block
+ * If this is true, the function blocks until num_entries slots are available
+ * to process. If false, then the function claims as many entries as
+ * currently possible.
+ *
+ * @param atomic
+ * if this is true, the function will return event according to event flow id
+ * @return
+ * The number of pointers to entries filled in to the entries array.
+ */
+uint32_t
+opdl_stage_claim(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block, bool atomic);
+
+uint32_t
+opdl_stage_deps_add(struct opdl_ring *t, struct opdl_stage *s,
+ uint32_t nb_instance, uint32_t instance_id,
+ struct opdl_stage *deps[], uint32_t num_deps);
+
+/**
+ * A function to check how many entries are ready to be claimed.
+ *
+ * @param entries
+ * An array of pointers to entries.
+ * @param num_entries
+ * Number of entries in an array.
+ * @param arg
+ * An opaque pointer to data passed to the claim function.
+ * @param block
+ * When set to true, the function should wait until num_entries are ready to
+ * be processed. Otherwise it should return immediately.
+ *
+ * @return
+ * Number of entries ready to be claimed.
+ */
+typedef uint32_t (opdl_ring_check_entries_t)(void *entries[],
+ uint32_t num_entries, void *arg, bool block);
+
+/**
+ * Before processing a batch of entries, a stage must first claim them to get
+ * access. Each entry is checked by the passed check() function and depending
+ * on block value, it waits until num_entries are ready or returns immediately.
+ * This function is only threadsafe with a different opdl_stage per thread.
+ *
+ * @param s
+ * The opdl_ring stage to read entries in.
+ * @param entries
+ * An array of pointers to entries that will be filled in by this function.
+ * @param num_entries
+ * The number of entries to attempt to claim for processing (and the size of
+ * the entries array).
+ * @param seq
+ * If not NULL, this is set to the value of the internal stage sequence number
+ * associated with the first entry returned.
+ * @param block
+ * If this is true, the function blocks until num_entries ready slots are
+ * available to process. If false, then the function claims as many ready
+ * entries as currently possible.
+ * @param check
+ * Pointer to a function called to check entries.
+ * @param arg
+ * Opaque data passed to check() function.
+ *
+ * @return
+ * The number of pointers to ready entries filled in to the entries array.
+ */
+uint32_t
+opdl_stage_claim_check(struct opdl_stage *s, void **entries,
+ uint32_t num_entries, uint32_t *seq, bool block,
+ opdl_ring_check_entries_t *check, void *arg);
+
+/**
+ * Before processing a batch of entries, a stage must first claim them to get
+ * access. This function is threadsafe using same opdl_stage parameter if
+ * the stage was created with threadsafe set to true, otherwise it is only
+ * threadsafe with a different opdl_stage per thread.
+ *
+ * The difference between this function and opdl_stage_claim() is that this
+ * function copies the entries from the opdl_ring. Note that any changes made to
+ * the copied entries will not be reflected back in to the entries in the
+ * opdl_ring, so this function probably only makes sense if the entries are
+ * pointers to other data. For performance reasons, this function does not check
+ * input parameters.
+ *
+ * @param s
+ * The opdl_ring stage to read entries in.
+ * @param entries
+ * An array of entries that will be filled in by this function.
+ * @param num_entries
+ * The number of entries to attempt to claim for processing (and the size of
+ * the entries array).
+ * @param seq
+ * If not NULL, this is set to the value of the internal stage sequence number
+ * associated with the first entry returned.
+ * @param block
+ * If this is true, the function blocks until num_entries slots are available
+ * to process. If false, then the function claims as many entries as
+ * currently possible.
+ *
+ * @return
+ * The number of entries copied in to the entries array.
+ */
+uint32_t
+opdl_stage_claim_copy(struct opdl_stage *s, void *entries,
+ uint32_t num_entries, uint32_t *seq, bool block);
+
+/**
+ * This function must be called when a stage has finished its processing of
+ * entries, to make them available to any dependent stages. All entries that are
+ * claimed by the calling thread in the stage will be disclaimed. It is possible
+ * to claim multiple batches before disclaiming. For performance reasons, this
+ * function does not check input parameters.
+ *
+ * @param s
+ * The opdl_ring stage in which to disclaim all claimed entries.
+ *
+ * @param block
+ * Entries are always made available to a stage in the same order that they
+ * were input in the stage. If a stage is multithread safe, this may mean that
+ * full disclaiming of a batch of entries can not be considered complete until
+ * all earlier threads in the stage have disclaimed. If this parameter is true
+ * then the function blocks until all entries are fully disclaimed, otherwise
+ * it disclaims as many as currently possible, with non fully disclaimed
+ * batches stored until the next call to a claim or disclaim function for this
+ * stage on this thread.
+ *
+ * If a thread is not going to process any more entries in this stage, it
+ * *must* first call this function with this parameter set to true to ensure
+ * it does not block the entire opdl_ring.
+ *
+ * In a single threaded stage, this parameter has no effect.
+ */
+int
+opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries,
+ bool block);
+
+/**
+ * This function can be called when a stage has finished its processing of
+ * entries, to make them available to any dependent stages. The difference
+ * between this function and opdl_stage_disclaim() is that here only a
+ * portion of entries are disclaimed, not all of them. For performance reasons,
+ * this function does not check input parameters.
+ *
+ * @param s
+ * The opdl_ring stage in which to disclaim entries.
+ *
+ * @param num_entries
+ * The number of entries to disclaim.
+ *
+ * @param block
+ * Entries are always made available to a stage in the same order that they
+ * were input in the stage. If a stage is multithread safe, this may mean that
+ * full disclaiming of a batch of entries can not be considered complete until
+ * all earlier threads in the stage have disclaimed. If this parameter is true
+ * then the function blocks until the specified number of entries has been
+ * disclaimed (or there are no more entries to disclaim). Otherwise it
+ * disclaims as many claims as currently possible and an attempt to disclaim
+ * them is made the next time a claim or disclaim function for this stage on
+ * this thread is called.
+ *
+ * In a single threaded stage, this parameter has no effect.
+ */
+void
+opdl_stage_disclaim_n(struct opdl_stage *s, uint32_t num_entries,
+ bool block);
+
+/**
+ * Check how many entries can be input.
+ *
+ * @param t
+ * The opdl_ring instance to check.
+ *
+ * @return
+ * The number of new entries currently allowed to be input.
+ */
+uint32_t
+opdl_ring_available(struct opdl_ring *t);
+
+/**
+ * Check how many entries can be processed in a stage.
+ *
+ * @param s
+ * The stage to check.
+ *
+ * @return
+ * The number of entries currently available to be processed in this stage.
+ */
+uint32_t
+opdl_stage_available(struct opdl_stage *s);
+
+/**
+ * Check how many entries are available to be processed.
+ *
+ * NOTE : DOES NOT CHANGE ANY STATE WITHIN THE STAGE
+ *
+ * @param s
+ * The stage to check.
+ *
+ * @param num_entries
+ * The number of entries to check for availability.
+ *
+ * @return
+ * The number of entries currently available to be processed in this stage.
+ */
+uint32_t
+opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries);
+
+/**
+ * Create empty stage instance and return the pointer.
+ *
+ * @param t
+ * The pointer of opdl_ring.
+ *
+ * @param threadsafe
+ * enable multiple thread or not.
+ * @return
+ * The pointer of one empty stage instance.
+ */
+struct opdl_stage *
+opdl_stage_create(struct opdl_ring *t, bool threadsafe);
+
+/**
+ * Prints information on opdl_ring instance and all its stages
+ *
+ * @param t
+ * The stage to print info on.
+ * @param f
+ * Where to print the info.
+ */
+void
+opdl_ring_dump(const struct opdl_ring *t, FILE *f);
+
+/**
+ * Blocks until all entries in a opdl_ring have been processed by all stages.
+ *
+ * @param t
+ * The opdl_ring instance to flush.
+ */
+void
+opdl_ring_flush(struct opdl_ring *t);
+
+/**
+ * Deallocates all resources used by a opdl_ring instance
+ *
+ * @param t
+ * The opdl_ring instance to free.
+ */
+void
+opdl_ring_free(struct opdl_ring *t);
+
+/**
+ * Search for a opdl_ring by its name
+ *
+ * @param name
+ * The name of the opdl_ring.
+ * @return
+ * The pointer to the opdl_ring matching the name, or NULL if not found.
+ *
+ */
+struct opdl_ring *
+opdl_ring_lookup(const char *name);
+
+/**
+ * Set a opdl_stage to threadsafe variable.
+ *
+ * @param s
+ * The opdl_stage.
+ * @param threadsafe
+ * Threadsafe value.
+ */
+void
+opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
+
+
+/**
+ * Compare the event descriptor with original version in the ring.
+ * if key field event descriptor is changed by application, then
+ * update the slot in the ring otherwise do nothing with it.
+ * the key field is flow_id, prioirty, mbuf, impl_opaque
+ *
+ * @param s
+ * The opdl_stage.
+ * @param ev
+ * pointer of the event descriptor.
+ * @param index
+ * index of the event descriptor.
+ * @param atomic
+ * queue type associate with the stage.
+ * @return
+ * if the evevnt key field is changed compare with previous record.
+ */
+
+bool
+opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
+ uint32_t index, bool atomic);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _OPDL_H_ */
diff --git a/drivers/event/opdl/opdl_test.c b/drivers/event/opdl/opdl_test.c
new file mode 100644
index 00000000..5868ec1b
--- /dev/null
+++ b/drivers/event/opdl/opdl_test.c
@@ -0,0 +1,1057 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_pause.h>
+
+#include "opdl_evdev.h"
+#include "opdl_log.h"
+
+
+#define MAX_PORTS 16
+#define MAX_QIDS 16
+#define NUM_PACKETS (1<<18)
+#define NUM_EVENTS 256
+#define BURST_SIZE 32
+
+
+
+static int evdev;
+
+struct test {
+ struct rte_mempool *mbuf_pool;
+ uint8_t port[MAX_PORTS];
+ uint8_t qid[MAX_QIDS];
+ int nb_qids;
+};
+
+static struct rte_mempool *eventdev_func_mempool;
+
+static __rte_always_inline struct rte_mbuf *
+rte_gen_arp(int portid, struct rte_mempool *mp)
+{
+ /*
+ * len = 14 + 46
+ * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
+ */
+ static const uint8_t arp_request[] = {
+ /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
+ 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
+ /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
+ 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
+ /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ };
+ struct rte_mbuf *m;
+ int pkt_len = sizeof(arp_request) - 1;
+
+ m = rte_pktmbuf_alloc(mp);
+ if (!m)
+ return 0;
+
+ memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
+ arp_request, pkt_len);
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ rte_pktmbuf_data_len(m) = pkt_len;
+
+ RTE_SET_USED(portid);
+
+ return m;
+}
+
+/* initialization and config */
+static __rte_always_inline int
+init(struct test *t, int nb_queues, int nb_ports)
+{
+ struct rte_event_dev_config config = {
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_event_queue_flows = 1024,
+ .nb_events_limit = 4096,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ int ret;
+
+ void *temp = t->mbuf_pool; /* save and restore mbuf pool */
+
+ memset(t, 0, sizeof(*t));
+ t->mbuf_pool = temp;
+
+ ret = rte_event_dev_configure(evdev, &config);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "%d: Error configuring device\n", __LINE__);
+ return ret;
+};
+
+static __rte_always_inline int
+create_ports(struct test *t, int num_ports)
+{
+ int i;
+ static const struct rte_event_port_conf conf = {
+ .new_event_threshold = 1024,
+ .dequeue_depth = 32,
+ .enqueue_depth = 32,
+ };
+ if (num_ports > MAX_PORTS)
+ return -1;
+
+ for (i = 0; i < num_ports; i++) {
+ if (rte_event_port_setup(evdev, i, &conf) < 0) {
+ PMD_DRV_LOG(ERR, "Error setting up port %d\n", i);
+ return -1;
+ }
+ t->port[i] = i;
+ }
+
+ return 0;
+};
+
+static __rte_always_inline int
+create_queues_type(struct test *t, int num_qids, enum queue_type flags)
+{
+ int i;
+ uint8_t type;
+
+ switch (flags) {
+ case OPDL_Q_TYPE_ORDERED:
+ type = RTE_SCHED_TYPE_ORDERED;
+ break;
+ case OPDL_Q_TYPE_ATOMIC:
+ type = RTE_SCHED_TYPE_ATOMIC;
+ break;
+ default:
+ type = 0;
+ }
+
+ /* Q creation */
+ const struct rte_event_queue_conf conf = {
+ .event_queue_cfg =
+ (flags == OPDL_Q_TYPE_SINGLE_LINK ?
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK : 0),
+ .schedule_type = type,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ for (i = t->nb_qids ; i < t->nb_qids + num_qids; i++) {
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ PMD_DRV_LOG(ERR, "%d: error creating qid %d\n ",
+ __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+ }
+
+ t->nb_qids += num_qids;
+
+ if (t->nb_qids > MAX_QIDS)
+ return -1;
+
+ return 0;
+}
+
+
+/* destruction */
+static __rte_always_inline int
+cleanup(struct test *t __rte_unused)
+{
+ rte_event_dev_stop(evdev);
+ rte_event_dev_close(evdev);
+ PMD_DRV_LOG(ERR, "clean up for test done\n");
+ return 0;
+};
+
+static int
+ordered_basic(struct test *t)
+{
+ const uint8_t rx_port = 0;
+ const uint8_t w1_port = 1;
+ const uint8_t w3_port = 3;
+ const uint8_t tx_port = 4;
+ int err;
+ uint32_t i;
+ uint32_t deq_pkts;
+ struct rte_mbuf *mbufs[3];
+
+ const uint32_t MAGIC_SEQN = 1234;
+
+ /* Create instance with 5 ports */
+ if (init(t, 2, tx_port+1) < 0 ||
+ create_ports(t, tx_port+1) < 0 ||
+ create_queues_type(t, 2, OPDL_Q_TYPE_ORDERED)) {
+ PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * CQ mapping to QID
+ * We need three ports, all mapped to the same ordered qid0. Then we'll
+ * take a packet out to each port, re-enqueue in reverse order,
+ * then make sure the reordering has taken place properly when we
+ * dequeue from the tx_port.
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port w1_port
+ * \ / \
+ * qid0 - w2_port - qid1
+ * \ / \
+ * w3_port tx_port
+ */
+ /* CQ mapping to QID for LB ports (directed mapped on create) */
+ for (i = w1_port; i <= w3_port; i++) {
+ err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
+ __LINE__);
+ cleanup(t);
+ return -1;
+ }
+ }
+
+ err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+ /* Enqueue 3 packets to the rx port */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+ if (!mbufs[i]) {
+ PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = mbufs[i];
+ mbufs[i]->seqn = MAGIC_SEQN + i;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
+ __LINE__, i, err);
+ return -1;
+ }
+ }
+
+ /* use extra slot to make logic in loops easier */
+ struct rte_event deq_ev[w3_port + 1];
+
+ uint32_t seq = 0;
+
+ /* Dequeue the 3 packets, one from each worker port */
+ for (i = w1_port; i <= w3_port; i++) {
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
+ &deq_ev[i], 1, 0);
+ if (deq_pkts != 1) {
+ PMD_DRV_LOG(ERR, "%d: Failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ seq = deq_ev[i].mbuf->seqn - MAGIC_SEQN;
+
+ if (seq != (i-1)) {
+ PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
+ "port number is %u\n", seq, i);
+ return -1;
+ }
+ }
+
+ /* Enqueue each packet in reverse order, flushing after each one */
+ for (i = w3_port; i >= w1_port; i--) {
+
+ deq_ev[i].op = RTE_EVENT_OP_FORWARD;
+ deq_ev[i].queue_id = t->qid[1];
+ err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* dequeue from the tx ports, we should get 3 packets */
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
+ 3, 0);
+
+ /* Check to see if we've got all 3 packets */
+ if (deq_pkts != 3) {
+ PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
+ __LINE__, deq_pkts, tx_port);
+ rte_event_dev_dump(evdev, stdout);
+ return 1;
+ }
+
+ /* Destroy the instance */
+ cleanup(t);
+
+ return 0;
+}
+
+
+static int
+atomic_basic(struct test *t)
+{
+ const uint8_t rx_port = 0;
+ const uint8_t w1_port = 1;
+ const uint8_t w3_port = 3;
+ const uint8_t tx_port = 4;
+ int err;
+ int i;
+ uint32_t deq_pkts;
+ struct rte_mbuf *mbufs[3];
+ const uint32_t MAGIC_SEQN = 1234;
+
+ /* Create instance with 5 ports */
+ if (init(t, 2, tx_port+1) < 0 ||
+ create_ports(t, tx_port+1) < 0 ||
+ create_queues_type(t, 2, OPDL_Q_TYPE_ATOMIC)) {
+ PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+
+ /*
+ * CQ mapping to QID
+ * We need three ports, all mapped to the same ordered qid0. Then we'll
+ * take a packet out to each port, re-enqueue in reverse order,
+ * then make sure the reordering has taken place properly when we
+ * dequeue from the tx_port.
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port w1_port
+ * \ / \
+ * qid0 - w2_port - qid1
+ * \ / \
+ * w3_port tx_port
+ */
+ /* CQ mapping to QID for Atomic ports (directed mapped on create) */
+ for (i = w1_port; i <= w3_port; i++) {
+ err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
+ __LINE__);
+ cleanup(t);
+ return -1;
+ }
+ }
+
+ err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* Enqueue 3 packets to the rx port */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+ if (!mbufs[i]) {
+ PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.flow_id = 1;
+ ev.mbuf = mbufs[i];
+ mbufs[i]->seqn = MAGIC_SEQN + i;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
+ __LINE__, i, err);
+ return -1;
+ }
+ }
+
+ /* use extra slot to make logic in loops easier */
+ struct rte_event deq_ev[w3_port + 1];
+
+ /* Dequeue the 3 packets, one from each worker port */
+ for (i = w1_port; i <= w3_port; i++) {
+
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
+ deq_ev, 3, 0);
+
+ if (t->port[i] != 2) {
+ if (deq_pkts != 0) {
+ PMD_DRV_LOG(ERR, "%d: deq none zero !\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ } else {
+
+ if (deq_pkts != 3) {
+ PMD_DRV_LOG(ERR, "%d: deq not eqal to 3 %u !\n",
+ __LINE__, deq_pkts);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ int j;
+ for (j = 0; j < 3; j++) {
+ deq_ev[j].op = RTE_EVENT_OP_FORWARD;
+ deq_ev[j].queue_id = t->qid[1];
+ }
+
+ err = rte_event_enqueue_burst(evdev, t->port[i],
+ deq_ev, 3);
+
+ if (err != 3) {
+ PMD_DRV_LOG(ERR, "port %d: Failed to enqueue pkt %u, "
+ "retval = %u\n",
+ t->port[i], 3, err);
+ return -1;
+ }
+
+ }
+
+ }
+
+
+ /* dequeue from the tx ports, we should get 3 packets */
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
+ 3, 0);
+
+ /* Check to see if we've got all 3 packets */
+ if (deq_pkts != 3) {
+ PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
+ __LINE__, deq_pkts, tx_port);
+ rte_event_dev_dump(evdev, stdout);
+ return 1;
+ }
+
+ cleanup(t);
+
+ return 0;
+}
+static __rte_always_inline int
+check_qid_stats(uint32_t id[], int index)
+{
+
+ if (index == 0) {
+ if (id[0] != 3 || id[1] != 3
+ || id[2] != 3)
+ return -1;
+ } else if (index == 1) {
+ if (id[0] != 5 || id[1] != 5
+ || id[2] != 2)
+ return -1;
+ } else if (index == 2) {
+ if (id[0] != 3 || id[1] != 1
+ || id[2] != 1)
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int
+check_statistics(void)
+{
+ int num_ports = 3; /* Hard-coded for this app */
+ int i;
+
+ for (i = 0; i < num_ports; i++) {
+ int num_stats, num_stats_returned;
+
+ num_stats = rte_event_dev_xstats_names_get(0,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ i,
+ NULL,
+ NULL,
+ 0);
+ if (num_stats > 0) {
+
+ uint32_t id[num_stats];
+ struct rte_event_dev_xstats_name names[num_stats];
+ uint64_t values[num_stats];
+
+ num_stats_returned = rte_event_dev_xstats_names_get(0,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ i,
+ names,
+ id,
+ num_stats);
+
+ if (num_stats == num_stats_returned) {
+ num_stats_returned = rte_event_dev_xstats_get(0,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ i,
+ id,
+ values,
+ num_stats);
+
+ if (num_stats == num_stats_returned) {
+ int err;
+
+ err = check_qid_stats(id, i);
+
+ if (err)
+ return err;
+
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+#define OLD_NUM_PACKETS 3
+#define NEW_NUM_PACKETS 2
+static int
+single_link_w_stats(struct test *t)
+{
+ const uint8_t rx_port = 0;
+ const uint8_t w1_port = 1;
+ const uint8_t tx_port = 2;
+ int err;
+ int i;
+ uint32_t deq_pkts;
+ struct rte_mbuf *mbufs[3];
+ RTE_SET_USED(mbufs);
+
+ /* Create instance with 3 ports */
+ if (init(t, 2, tx_port + 1) < 0 ||
+ create_ports(t, 3) < 0 || /* 0,1,2 */
+ create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
+ create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
+ PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+
+ /*
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port(0)
+ * \
+ * qid0 - w1_port(1) - qid1
+ * \
+ * tx_port(2)
+ */
+
+ err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
+ __LINE__,
+ t->port[1],
+ t->qid[0]);
+ cleanup(t);
+ return -1;
+ }
+
+ err = rte_event_port_link(evdev, t->port[2], &t->qid[1], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
+ __LINE__,
+ t->port[2],
+ t->qid[1]);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) != 0) {
+ PMD_DRV_LOG(ERR, "%d: failed to start device\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ /*
+ * Enqueue 3 packets to the rx port
+ */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+ if (!mbufs[i]) {
+ PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = mbufs[i];
+ mbufs[i]->seqn = 1234 + i;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
+ __LINE__,
+ t->port[rx_port],
+ err);
+ return -1;
+ }
+ }
+
+ /* Dequeue the 3 packets, from SINGLE_LINK worker port */
+ struct rte_event deq_ev[3];
+
+ deq_pkts = rte_event_dequeue_burst(evdev,
+ t->port[w1_port],
+ deq_ev, 3, 0);
+
+ if (deq_pkts != 3) {
+ PMD_DRV_LOG(ERR, "%d: deq not 3 !\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ /* Just enqueue 2 onto new ring */
+ for (i = 0; i < NEW_NUM_PACKETS; i++)
+ deq_ev[i].queue_id = t->qid[1];
+
+ deq_pkts = rte_event_enqueue_burst(evdev,
+ t->port[w1_port],
+ deq_ev,
+ NEW_NUM_PACKETS);
+
+ if (deq_pkts != 2) {
+ PMD_DRV_LOG(ERR, "%d: enq not 2 but %u!\n", __LINE__, deq_pkts);
+ cleanup(t);
+ return -1;
+ }
+
+ /* dequeue from the tx ports, we should get 2 packets */
+ deq_pkts = rte_event_dequeue_burst(evdev,
+ t->port[tx_port],
+ deq_ev,
+ 3,
+ 0);
+
+ /* Check to see if we've got all 2 packets */
+ if (deq_pkts != 2) {
+ PMD_DRV_LOG(ERR, "%d: expected 2 pkts at tx port got %d from port %d\n",
+ __LINE__, deq_pkts, tx_port);
+ cleanup(t);
+ return -1;
+ }
+
+ if (!check_statistics()) {
+ PMD_DRV_LOG(ERR, "xstats check failed");
+ cleanup(t);
+ return -1;
+ }
+
+ cleanup(t);
+
+ return 0;
+}
+
+static int
+single_link(struct test *t)
+{
+ /* const uint8_t rx_port = 0; */
+ /* const uint8_t w1_port = 1; */
+ /* const uint8_t w3_port = 3; */
+ const uint8_t tx_port = 2;
+ int err;
+ struct rte_mbuf *mbufs[3];
+ RTE_SET_USED(mbufs);
+
+ /* Create instance with 5 ports */
+ if (init(t, 2, tx_port+1) < 0 ||
+ create_ports(t, 3) < 0 || /* 0,1,2 */
+ create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
+ create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
+ PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+
+ /*
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port(0)
+ * \
+ * qid0 - w1_port(1) - qid1
+ * \
+ * tx_port(2)
+ */
+
+ err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ err = rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) == 0) {
+ PMD_DRV_LOG(ERR, "%d: start DIDN'T FAIL with more than 1 "
+ "SINGLE_LINK PORT\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ cleanup(t);
+
+ return 0;
+}
+
+
+static __rte_always_inline void
+populate_event_burst(struct rte_event ev[],
+ uint8_t qid,
+ uint16_t num_events)
+{
+ uint16_t i;
+ for (i = 0; i < num_events; i++) {
+ ev[i].flow_id = 1;
+ ev[i].op = RTE_EVENT_OP_NEW;
+ ev[i].sched_type = RTE_SCHED_TYPE_ORDERED;
+ ev[i].queue_id = qid;
+ ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev[i].sub_event_type = 0;
+ ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev[i].mbuf = (struct rte_mbuf *)0xdead0000;
+ }
+}
+
+#define NUM_QUEUES 3
+#define BATCH_SIZE 32
+
+static int
+qid_basic(struct test *t)
+{
+ int err = 0;
+
+ uint8_t q_id = 0;
+ uint8_t p_id = 0;
+
+ uint32_t num_events;
+ uint32_t i;
+
+ struct rte_event ev[BATCH_SIZE];
+
+ /* Create instance with 4 ports */
+ if (init(t, NUM_QUEUES, NUM_QUEUES+1) < 0 ||
+ create_ports(t, NUM_QUEUES+1) < 0 ||
+ create_queues_type(t, NUM_QUEUES, OPDL_Q_TYPE_ORDERED)) {
+ PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < NUM_QUEUES; i++) {
+ int nb_linked;
+ q_id = i;
+
+ nb_linked = rte_event_port_link(evdev,
+ i+1, /* port = q_id + 1*/
+ &q_id,
+ NULL,
+ 1);
+
+ if (nb_linked != 1) {
+
+ PMD_DRV_LOG(ERR, "%s:%d: error mapping port:%u to queue:%u\n",
+ __FILE__,
+ __LINE__,
+ i + 1,
+ q_id);
+
+ err = -1;
+ break;
+ }
+
+ }
+
+
+ /* Try and link to the same port again */
+ if (!err) {
+ uint8_t t_qid = 0;
+ if (rte_event_port_link(evdev,
+ 1,
+ &t_qid,
+ NULL,
+ 1) > 0) {
+ PMD_DRV_LOG(ERR, "%s:%d: Second call to port link on same port DID NOT fail\n",
+ __FILE__,
+ __LINE__);
+ err = -1;
+ }
+
+ uint32_t test_num_events;
+
+ if (!err) {
+ test_num_events = rte_event_dequeue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE,
+ 0);
+ if (test_num_events != 0) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing 0 packets from port %u on stopped device\n",
+ __FILE__,
+ __LINE__,
+ p_id);
+ err = -1;
+ }
+ }
+
+ if (!err) {
+ test_num_events = rte_event_enqueue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE);
+ if (test_num_events != 0) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing 0 packets to port %u on stopped device\n",
+ __FILE__,
+ __LINE__,
+ p_id);
+ err = -1;
+ }
+ }
+ }
+
+
+ /* Start the devicea */
+ if (!err) {
+ if (rte_event_dev_start(evdev) < 0) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n",
+ __FILE__,
+ __LINE__);
+ err = -1;
+ }
+ }
+
+
+ /* Check we can't do any more links now that device is started.*/
+ if (!err) {
+ uint8_t t_qid = 0;
+ if (rte_event_port_link(evdev,
+ 1,
+ &t_qid,
+ NULL,
+ 1) > 0) {
+ PMD_DRV_LOG(ERR, "%s:%d: Call to port link on started device DID NOT fail\n",
+ __FILE__,
+ __LINE__);
+ err = -1;
+ }
+ }
+
+ if (!err) {
+
+ q_id = 0;
+
+ populate_event_burst(ev,
+ q_id,
+ BATCH_SIZE);
+
+ num_events = rte_event_enqueue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE);
+ if (num_events != BATCH_SIZE) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing rx packets\n",
+ __FILE__,
+ __LINE__);
+ err = -1;
+ }
+ }
+
+ if (!err) {
+ while (++p_id < NUM_QUEUES) {
+
+ num_events = rte_event_dequeue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE,
+ 0);
+
+ if (num_events != BATCH_SIZE) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from port %u\n",
+ __FILE__,
+ __LINE__,
+ p_id);
+ err = -1;
+ break;
+ }
+
+ if (ev[0].queue_id != q_id) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error event portid[%u] q_id:[%u] does not match expected:[%u]\n",
+ __FILE__,
+ __LINE__,
+ p_id,
+ ev[0].queue_id,
+ q_id);
+ err = -1;
+ break;
+ }
+
+ populate_event_burst(ev,
+ ++q_id,
+ BATCH_SIZE);
+
+ num_events = rte_event_enqueue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE);
+ if (num_events != BATCH_SIZE) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing packets from port:%u to queue:%u\n",
+ __FILE__,
+ __LINE__,
+ p_id,
+ q_id);
+ err = -1;
+ break;
+ }
+ }
+ }
+
+ if (!err) {
+ num_events = rte_event_dequeue_burst(evdev,
+ p_id,
+ ev,
+ BATCH_SIZE,
+ 0);
+ if (num_events != BATCH_SIZE) {
+ PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from tx port %u\n",
+ __FILE__,
+ __LINE__,
+ p_id);
+ err = -1;
+ }
+ }
+
+ cleanup(t);
+
+ return err;
+}
+
+
+
+int
+opdl_selftest(void)
+{
+ struct test *t = malloc(sizeof(struct test));
+ int ret;
+
+ const char *eventdev_name = "event_opdl0";
+
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+
+ if (evdev < 0) {
+ PMD_DRV_LOG(ERR, "%d: Eventdev %s not found - creating.\n",
+ __LINE__, eventdev_name);
+ /* turn on stats by default */
+ if (rte_vdev_init(eventdev_name, "do_validation=1") < 0) {
+ PMD_DRV_LOG(ERR, "Error creating eventdev\n");
+ free(t);
+ return -1;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ PMD_DRV_LOG(ERR, "Error finding newly created eventdev\n");
+ free(t);
+ return -1;
+ }
+ }
+
+ /* Only create mbuf pool once, reuse for each test run */
+ if (!eventdev_func_mempool) {
+ eventdev_func_mempool = rte_pktmbuf_pool_create(
+ "EVENTDEV_SW_SA_MBUF_POOL",
+ (1<<12), /* 4k buffers */
+ 32 /*MBUF_CACHE_SIZE*/,
+ 0,
+ 512, /* use very small mbufs */
+ rte_socket_id());
+ if (!eventdev_func_mempool) {
+ PMD_DRV_LOG(ERR, "ERROR creating mempool\n");
+ free(t);
+ return -1;
+ }
+ }
+ t->mbuf_pool = eventdev_func_mempool;
+
+ PMD_DRV_LOG(ERR, "*** Running Ordered Basic test...\n");
+ ret = ordered_basic(t);
+
+ PMD_DRV_LOG(ERR, "*** Running Atomic Basic test...\n");
+ ret = atomic_basic(t);
+
+
+ PMD_DRV_LOG(ERR, "*** Running QID Basic test...\n");
+ ret = qid_basic(t);
+
+ PMD_DRV_LOG(ERR, "*** Running SINGLE LINK failure test...\n");
+ ret = single_link(t);
+
+ PMD_DRV_LOG(ERR, "*** Running SINGLE LINK w stats test...\n");
+ ret = single_link_w_stats(t);
+
+ /*
+ * Free test instance, free mempool
+ */
+ rte_mempool_free(t->mbuf_pool);
+ free(t);
+
+ if (ret != 0)
+ return ret;
+ return 0;
+
+}
diff --git a/drivers/event/opdl/rte_pmd_evdev_opdl_version.map b/drivers/event/opdl/rte_pmd_evdev_opdl_version.map
new file mode 100644
index 00000000..58b94270
--- /dev/null
+++ b/drivers/event/opdl/rte_pmd_evdev_opdl_version.map
@@ -0,0 +1,3 @@
+DPDK_18.02 {
+ local: *;
+};
diff --git a/drivers/event/skeleton/Makefile b/drivers/event/skeleton/Makefile
index a24738b1..0f7f07ea 100644
--- a/drivers/event/skeleton/Makefile
+++ b/drivers/event/skeleton/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 Cavium, Inc. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Cavium, Inc nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Cavium, Inc
#
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/event/skeleton/meson.build b/drivers/event/skeleton/meson.build
new file mode 100644
index 00000000..acfe1565
--- /dev/null
+++ b/drivers/event/skeleton/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('skeleton_eventdev.c')
+deps += ['bus_pci', 'bus_vdev']
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index bb554c36..7f467568 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#include <assert.h>
@@ -237,6 +209,7 @@ skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
port_conf->new_event_threshold = 32 * 1024;
port_conf->dequeue_depth = 16;
port_conf->enqueue_depth = 16;
+ port_conf->disable_implicit_release = 0;
}
static void
diff --git a/drivers/event/skeleton/skeleton_eventdev.h b/drivers/event/skeleton/skeleton_eventdev.h
index 32064721..ba64b8ae 100644
--- a/drivers/event/skeleton/skeleton_eventdev.h
+++ b/drivers/event/skeleton/skeleton_eventdev.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef __SKELETON_EVENTDEV_H__
diff --git a/drivers/event/sw/Makefile b/drivers/event/sw/Makefile
index 2f2b67ba..81236a39 100644
--- a/drivers/event/sw/Makefile
+++ b/drivers/event/sw/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016-2017 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
@@ -34,6 +7,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_sw_event.a
# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
# for older GCC versions, allow us to initialize an event using
@@ -44,6 +18,7 @@ CFLAGS += -Wno-missing-field-initializers
endif
endif
LDLIBS += -lrte_eal -lrte_eventdev -lrte_kvargs -lrte_ring
+LDLIBS += -lrte_mempool -lrte_mbuf
LDLIBS += -lrte_bus_vdev
# library version
@@ -57,6 +32,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_worker.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_scheduler.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_xstats.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_selftest.c
# export include files
SYMLINK-y-include +=
diff --git a/drivers/event/sw/event_ring.h b/drivers/event/sw/event_ring.h
index 734a3b4b..02308728 100644
--- a/drivers/event/sw/event_ring.h
+++ b/drivers/event/sw/event_ring.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
/*
diff --git a/drivers/event/sw/iq_chunk.h b/drivers/event/sw/iq_chunk.h
new file mode 100644
index 00000000..31d013ea
--- /dev/null
+++ b/drivers/event/sw/iq_chunk.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _IQ_CHUNK_H_
+#define _IQ_CHUNK_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <rte_eventdev.h>
+
+#define IQ_ROB_NAMESIZE 12
+
+struct sw_queue_chunk {
+ struct rte_event events[SW_EVS_PER_Q_CHUNK];
+ struct sw_queue_chunk *next;
+} __rte_cache_aligned;
+
+static __rte_always_inline bool
+iq_empty(struct sw_iq *iq)
+{
+ return (iq->count == 0);
+}
+
+static __rte_always_inline uint16_t
+iq_count(const struct sw_iq *iq)
+{
+ return iq->count;
+}
+
+static __rte_always_inline struct sw_queue_chunk *
+iq_alloc_chunk(struct sw_evdev *sw)
+{
+ struct sw_queue_chunk *chunk = sw->chunk_list_head;
+ sw->chunk_list_head = chunk->next;
+ chunk->next = NULL;
+ return chunk;
+}
+
+static __rte_always_inline void
+iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
+{
+ chunk->next = sw->chunk_list_head;
+ sw->chunk_list_head = chunk;
+}
+
+static __rte_always_inline void
+iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
+{
+ while (head) {
+ struct sw_queue_chunk *next;
+ next = head->next;
+ iq_free_chunk(sw, head);
+ head = next;
+ }
+}
+
+static __rte_always_inline void
+iq_init(struct sw_evdev *sw, struct sw_iq *iq)
+{
+ iq->head = iq_alloc_chunk(sw);
+ iq->tail = iq->head;
+ iq->head_idx = 0;
+ iq->tail_idx = 0;
+ iq->count = 0;
+}
+
+static __rte_always_inline void
+iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev)
+{
+ iq->tail->events[iq->tail_idx++] = *ev;
+ iq->count++;
+
+ if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) {
+ /* The number of chunks is defined in relation to the total
+ * number of inflight events and number of IQS such that
+ * allocation will always succeed.
+ */
+ struct sw_queue_chunk *chunk = iq_alloc_chunk(sw);
+ iq->tail->next = chunk;
+ iq->tail = chunk;
+ iq->tail_idx = 0;
+ }
+}
+
+static __rte_always_inline void
+iq_pop(struct sw_evdev *sw, struct sw_iq *iq)
+{
+ iq->head_idx++;
+ iq->count--;
+
+ if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) {
+ struct sw_queue_chunk *next = iq->head->next;
+ iq_free_chunk(sw, iq->head);
+ iq->head = next;
+ iq->head_idx = 0;
+ }
+}
+
+static __rte_always_inline const struct rte_event *
+iq_peek(struct sw_iq *iq)
+{
+ return &iq->head->events[iq->head_idx];
+}
+
+/* Note: the caller must ensure that count <= iq_count() */
+static __rte_always_inline uint16_t
+iq_dequeue_burst(struct sw_evdev *sw,
+ struct sw_iq *iq,
+ struct rte_event *ev,
+ uint16_t count)
+{
+ struct sw_queue_chunk *current;
+ uint16_t total, index;
+
+ count = RTE_MIN(count, iq_count(iq));
+
+ current = iq->head;
+ index = iq->head_idx;
+ total = 0;
+
+ /* Loop over the chunks */
+ while (1) {
+ struct sw_queue_chunk *next;
+ for (; index < SW_EVS_PER_Q_CHUNK;) {
+ ev[total++] = current->events[index++];
+
+ if (unlikely(total == count))
+ goto done;
+ }
+
+ /* Move to the next chunk */
+ next = current->next;
+ iq_free_chunk(sw, current);
+ current = next;
+ index = 0;
+ }
+
+done:
+ if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
+ struct sw_queue_chunk *next = current->next;
+ iq_free_chunk(sw, current);
+ iq->head = next;
+ iq->head_idx = 0;
+ } else {
+ iq->head = current;
+ iq->head_idx = index;
+ }
+
+ iq->count -= total;
+
+ return total;
+}
+
+static __rte_always_inline void
+iq_put_back(struct sw_evdev *sw,
+ struct sw_iq *iq,
+ struct rte_event *ev,
+ unsigned int count)
+{
+ /* Put back events that fit in the current head chunk. If necessary,
+ * put back events in a new head chunk. The caller must ensure that
+ * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is
+ * needed.
+ */
+ uint16_t avail_space = iq->head_idx;
+
+ if (avail_space >= count) {
+ const uint16_t idx = avail_space - count;
+ uint16_t i;
+
+ for (i = 0; i < count; i++)
+ iq->head->events[idx + i] = ev[i];
+
+ iq->head_idx = idx;
+ } else if (avail_space < count) {
+ const uint16_t remaining = count - avail_space;
+ struct sw_queue_chunk *new_head;
+ uint16_t i;
+
+ for (i = 0; i < avail_space; i++)
+ iq->head->events[i] = ev[remaining + i];
+
+ new_head = iq_alloc_chunk(sw);
+ new_head->next = iq->head;
+ iq->head = new_head;
+ iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining;
+
+ for (i = 0; i < remaining; i++)
+ iq->head->events[iq->head_idx + i] = ev[i];
+ }
+
+ iq->count += count;
+}
+
+#endif /* _IQ_CHUNK_H_ */
diff --git a/drivers/event/sw/iq_ring.h b/drivers/event/sw/iq_ring.h
deleted file mode 100644
index 64cf6784..00000000
--- a/drivers/event/sw/iq_ring.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Ring structure definitions used for the internal ring buffers of the
- * SW eventdev implementation. These are designed for single-core use only.
- */
-#ifndef _IQ_RING_
-#define _IQ_RING_
-
-#include <stdint.h>
-
-#include <rte_common.h>
-#include <rte_memory.h>
-#include <rte_malloc.h>
-#include <rte_eventdev.h>
-
-#define IQ_RING_NAMESIZE 12
-#define QID_IQ_DEPTH 512
-#define QID_IQ_MASK (uint16_t)(QID_IQ_DEPTH - 1)
-
-struct iq_ring {
- char name[IQ_RING_NAMESIZE] __rte_cache_aligned;
- uint16_t write_idx;
- uint16_t read_idx;
-
- struct rte_event ring[QID_IQ_DEPTH];
-};
-
-static inline struct iq_ring *
-iq_ring_create(const char *name, unsigned int socket_id)
-{
- struct iq_ring *retval;
-
- retval = rte_malloc_socket(NULL, sizeof(*retval), 0, socket_id);
- if (retval == NULL)
- goto end;
-
- snprintf(retval->name, sizeof(retval->name), "%s", name);
- retval->write_idx = retval->read_idx = 0;
-end:
- return retval;
-}
-
-static inline void
-iq_ring_destroy(struct iq_ring *r)
-{
- rte_free(r);
-}
-
-static __rte_always_inline uint16_t
-iq_ring_count(const struct iq_ring *r)
-{
- return r->write_idx - r->read_idx;
-}
-
-static __rte_always_inline uint16_t
-iq_ring_free_count(const struct iq_ring *r)
-{
- return QID_IQ_MASK - iq_ring_count(r);
-}
-
-static __rte_always_inline uint16_t
-iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
-{
- const uint16_t read = r->read_idx;
- uint16_t write = r->write_idx;
- const uint16_t space = read + QID_IQ_MASK - write;
- uint16_t i;
-
- if (space < nb_qes)
- nb_qes = space;
-
- for (i = 0; i < nb_qes; i++, write++)
- r->ring[write & QID_IQ_MASK] = qes[i];
-
- r->write_idx = write;
-
- return nb_qes;
-}
-
-static __rte_always_inline uint16_t
-iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
-{
- uint16_t read = r->read_idx;
- const uint16_t write = r->write_idx;
- const uint16_t items = write - read;
- uint16_t i;
-
- for (i = 0; i < nb_qes; i++, read++)
- qes[i] = r->ring[read & QID_IQ_MASK];
-
- if (items < nb_qes)
- nb_qes = items;
-
- r->read_idx += nb_qes;
-
- return nb_qes;
-}
-
-/* assumes there is space, from a previous dequeue_burst */
-static __rte_always_inline uint16_t
-iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
-{
- uint16_t i, read = r->read_idx;
-
- for (i = nb_qes; i-- > 0; )
- r->ring[--read & QID_IQ_MASK] = qes[i];
-
- r->read_idx = read;
- return nb_qes;
-}
-
-static __rte_always_inline const struct rte_event *
-iq_ring_peek(const struct iq_ring *r)
-{
- return &r->ring[r->read_idx & QID_IQ_MASK];
-}
-
-static __rte_always_inline void
-iq_ring_pop(struct iq_ring *r)
-{
- r->read_idx++;
-}
-
-static __rte_always_inline int
-iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe)
-{
- const uint16_t read = r->read_idx;
- const uint16_t write = r->write_idx;
- const uint16_t space = read + QID_IQ_MASK - write;
-
- if (space == 0)
- return -1;
-
- r->ring[write & QID_IQ_MASK] = *qe;
-
- r->write_idx = write + 1;
-
- return 0;
-}
-
-#endif
diff --git a/drivers/event/sw/meson.build b/drivers/event/sw/meson.build
new file mode 100644
index 00000000..30d22164
--- /dev/null
+++ b/drivers/event/sw/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('sw_evdev_scheduler.c',
+ 'sw_evdev_selftest.c',
+ 'sw_evdev_worker.c',
+ 'sw_evdev_xstats.c',
+ 'sw_evdev.c'
+)
+deps += ['hash', 'bus_vdev']
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index fd110797..6672fd8e 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <inttypes.h>
@@ -41,7 +13,7 @@
#include <rte_service_component.h>
#include "sw_evdev.h"
-#include "iq_ring.h"
+#include "iq_chunk.h"
#define EVENTDEV_NAME_SW_PMD event_sw
#define NUMA_NODE_ARG "numa_node"
@@ -62,6 +34,7 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
RTE_SET_USED(priorities);
for (i = 0; i < num; i++) {
struct sw_qid *q = &sw->qids[queues[i]];
+ unsigned int j;
/* check for qid map overflow */
if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
@@ -74,6 +47,15 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
break;
}
+ for (j = 0; j < q->cq_num_mapped_cqs; j++) {
+ if (q->cq_map[j] == p->id)
+ break;
+ }
+
+ /* check if port is already linked */
+ if (j < q->cq_num_mapped_cqs)
+ continue;
+
if (q->type == SW_SCHED_TYPE_DIRECT) {
/* check directed qids only map to one port */
if (p->num_qids_mapped > 0) {
@@ -181,6 +163,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
}
p->inflight_max = conf->new_event_threshold;
+ p->implicit_release = !conf->disable_implicit_release;
/* check if ring exists, same as rx_worker above */
snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
@@ -231,18 +214,9 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
unsigned int i;
int dev_id = sw->data->dev_id;
int socket_id = sw->data->socket_id;
- char buf[IQ_RING_NAMESIZE];
+ char buf[IQ_ROB_NAMESIZE];
struct sw_qid *qid = &sw->qids[idx];
- for (i = 0; i < SW_IQS_MAX; i++) {
- snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
- qid->iq[i] = iq_ring_create(buf, socket_id);
- if (!qid->iq[i]) {
- SW_LOG_DBG("ring create failed");
- goto cleanup;
- }
- }
-
/* Initialize the FID structures to no pinning (-1), and zero packets */
const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
for (i = 0; i < RTE_DIM(qid->fids); i++)
@@ -320,11 +294,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
return 0;
cleanup:
- for (i = 0; i < SW_IQS_MAX; i++) {
- if (qid->iq[i])
- iq_ring_destroy(qid->iq[i]);
- }
-
if (qid->reorder_buffer) {
rte_free(qid->reorder_buffer);
qid->reorder_buffer = NULL;
@@ -338,6 +307,19 @@ cleanup:
return -EINVAL;
}
+static void
+sw_queue_release(struct rte_eventdev *dev, uint8_t id)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ struct sw_qid *qid = &sw->qids[id];
+
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ rte_free(qid->reorder_buffer);
+ rte_ring_free(qid->reorder_buffer_freelist);
+ }
+ memset(qid, 0, sizeof(*qid));
+}
+
static int
sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
const struct rte_event_queue_conf *conf)
@@ -355,24 +337,46 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
}
struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ if (sw->qids[queue_id].initialized)
+ sw_queue_release(dev, queue_id);
+
return qid_init(sw, queue_id, type, conf);
}
static void
-sw_queue_release(struct rte_eventdev *dev, uint8_t id)
+sw_init_qid_iqs(struct sw_evdev *sw)
{
- struct sw_evdev *sw = sw_pmd_priv(dev);
- struct sw_qid *qid = &sw->qids[id];
- uint32_t i;
+ int i, j;
- for (i = 0; i < SW_IQS_MAX; i++)
- iq_ring_destroy(qid->iq[i]);
+ /* Initialize the IQ memory of all configured qids */
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct sw_qid *qid = &sw->qids[i];
- if (qid->type == RTE_SCHED_TYPE_ORDERED) {
- rte_free(qid->reorder_buffer);
- rte_ring_free(qid->reorder_buffer_freelist);
+ if (!qid->initialized)
+ continue;
+
+ for (j = 0; j < SW_IQS_MAX; j++)
+ iq_init(sw, &qid->iq[j]);
+ }
+}
+
+static void
+sw_clean_qid_iqs(struct sw_evdev *sw)
+{
+ int i, j;
+
+ /* Release the IQ memory of all configured qids */
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct sw_qid *qid = &sw->qids[i];
+
+ for (j = 0; j < SW_IQS_MAX; j++) {
+ if (!qid->iq[j].head)
+ continue;
+ iq_free_chunk_list(sw, qid->iq[j].head);
+ qid->iq[j].head = NULL;
+ }
}
- memset(qid, 0, sizeof(*qid));
}
static void
@@ -402,6 +406,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
port_conf->new_event_threshold = 1024;
port_conf->dequeue_depth = 16;
port_conf->enqueue_depth = 16;
+ port_conf->disable_implicit_release = 0;
}
static int
@@ -410,12 +415,36 @@ sw_dev_configure(const struct rte_eventdev *dev)
struct sw_evdev *sw = sw_pmd_priv(dev);
const struct rte_eventdev_data *data = dev->data;
const struct rte_event_dev_config *conf = &data->dev_conf;
+ int num_chunks, i;
sw->qid_count = conf->nb_event_queues;
sw->port_count = conf->nb_event_ports;
sw->nb_events_limit = conf->nb_events_limit;
rte_atomic32_set(&sw->inflights, 0);
+ /* Number of chunks sized for worst-case spread of events across IQs */
+ num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
+ sw->qid_count*SW_IQS_MAX*2;
+
+ /* If this is a reconfiguration, free the previous IQ allocation. All
+ * IQ chunk references were cleaned out of the QIDs in sw_stop(), and
+ * will be reinitialized in sw_start().
+ */
+ if (sw->chunks)
+ rte_free(sw->chunks);
+
+ sw->chunks = rte_malloc_socket(NULL,
+ sizeof(struct sw_queue_chunk) *
+ num_chunks,
+ 0,
+ sw->data->socket_id);
+ if (!sw->chunks)
+ return -ENOMEM;
+
+ sw->chunk_list_head = NULL;
+ for (i = 0; i < num_chunks; i++)
+ iq_free_chunk(sw, &sw->chunks[i]);
+
if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
return -ENOTSUP;
@@ -450,9 +479,14 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
.max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
.max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
.max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
- .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
- RTE_EVENT_DEV_CAP_BURST_MODE |
- RTE_EVENT_DEV_CAP_EVENT_QOS),
+ .event_dev_cap = (
+ RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_BURST_MODE |
+ RTE_EVENT_DEV_CAP_EVENT_QOS |
+ RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
+ RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE),
};
*info = evdev_sw_info;
@@ -589,17 +623,16 @@ sw_dump(struct rte_eventdev *dev, FILE *f)
uint32_t iq;
uint32_t iq_printed = 0;
for (iq = 0; iq < SW_IQS_MAX; iq++) {
- if (!qid->iq[iq]) {
+ if (!qid->iq[iq].head) {
fprintf(f, "\tiq %d is not initialized.\n", iq);
iq_printed = 1;
continue;
}
- uint32_t used = iq_ring_count(qid->iq[iq]);
- uint32_t free = iq_ring_free_count(qid->iq[iq]);
- const char *col = (free == 0) ? COL_RED : COL_RESET;
+ uint32_t used = iq_count(&qid->iq[iq]);
+ const char *col = COL_RESET;
if (used > 0) {
- fprintf(f, "\t%siq %d: Used %d\tFree %d"
- COL_RESET"\n", col, iq, used, free);
+ fprintf(f, "\t%siq %d: Used %d"
+ COL_RESET"\n", col, iq, used);
iq_printed = 1;
}
}
@@ -632,8 +665,8 @@ sw_start(struct rte_eventdev *dev)
/* check all queues are configured and mapped to ports*/
for (i = 0; i < sw->qid_count; i++)
- if (sw->qids[i].iq[0] == NULL ||
- sw->qids[i].cq_num_mapped_cqs == 0) {
+ if (!sw->qids[i].initialized ||
+ sw->qids[i].cq_num_mapped_cqs == 0) {
SW_LOG_ERR("Queue %d not configured\n", i);
return -ENOLINK;
}
@@ -654,6 +687,8 @@ sw_start(struct rte_eventdev *dev)
}
}
+ sw_init_qid_iqs(sw);
+
if (sw_xstats_init(sw) < 0)
return -EINVAL;
@@ -667,6 +702,7 @@ static void
sw_stop(struct rte_eventdev *dev)
{
struct sw_evdev *sw = sw_pmd_priv(dev);
+ sw_clean_qid_iqs(sw);
sw_xstats_uninit(sw);
sw->started = 0;
rte_smp_wmb();
@@ -759,6 +795,8 @@ sw_probe(struct rte_vdev_device *vdev)
.xstats_get_names = sw_xstats_get_names,
.xstats_get_by_name = sw_xstats_get_by_name,
.xstats_reset = sw_xstats_reset,
+
+ .dev_selftest = test_sw_eventdev,
};
static const char *const args[] = {
@@ -891,3 +929,15 @@ static struct rte_vdev_driver evdev_sw_pmd_drv = {
RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");
+
+/* declared extern in header, for access from other .c files */
+int eventdev_sw_log_level;
+
+RTE_INIT(evdev_sw_init_log);
+static void
+evdev_sw_init_log(void)
+{
+ eventdev_sw_log_level = rte_log_register("pmd.event.sw");
+ if (eventdev_sw_log_level >= 0)
+ rte_log_set_level(eventdev_sw_log_level, RTE_LOG_NOTICE);
+}
diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
index e0dec910..d90b96d4 100644
--- a/drivers/event/sw/sw_evdev.h
+++ b/drivers/event/sw/sw_evdev.h
@@ -1,38 +1,11 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#ifndef _SW_EVDEV_H_
#define _SW_EVDEV_H_
+#include "sw_evdev_log.h"
#include <rte_eventdev.h>
#include <rte_eventdev_pmd_vdev.h>
#include <rte_atomic.h>
@@ -49,6 +22,10 @@
#define MAX_SW_PROD_Q_DEPTH 4096
#define SW_FRAGMENTS_MAX 16
+/* Should be power-of-two minus one, to leave room for the next pointer */
+#define SW_EVS_PER_Q_CHUNK 255
+#define SW_Q_CHUNK_SIZE ((SW_EVS_PER_Q_CHUNK + 1) * sizeof(struct rte_event))
+
/* report dequeue burst sizes in buckets */
#define SW_DEQ_STAT_BUCKET_SHIFT 2
/* how many packets pulled from port by sched */
@@ -88,26 +65,6 @@ static const uint8_t sw_qe_flag_map[] = {
QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
};
-#ifdef RTE_LIBRTE_PMD_EVDEV_SW_DEBUG
-#define SW_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
- SW_PMD_NAME, \
- __func__, __LINE__, ## args)
-
-#define SW_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
- SW_PMD_NAME, \
- __func__, __LINE__, ## args)
-#else
-#define SW_LOG_INFO(fmt, args...)
-#define SW_LOG_DBG(fmt, args...)
-#endif
-
-#define SW_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
- SW_PMD_NAME, \
- __func__, __LINE__, ## args)
-
/* Records basic event stats at a given point. Used in port and qid structs */
struct sw_point_stats {
uint64_t rx_pkts;
@@ -130,6 +87,14 @@ struct reorder_buffer_entry {
struct rte_event fragments[SW_FRAGMENTS_MAX];
};
+struct sw_iq {
+ struct sw_queue_chunk *head;
+ struct sw_queue_chunk *tail;
+ uint16_t head_idx;
+ uint16_t tail_idx;
+ uint16_t count;
+};
+
struct sw_qid {
/* set when the QID has been initialized */
uint8_t initialized;
@@ -142,7 +107,7 @@ struct sw_qid {
struct sw_point_stats stats;
/* Internal priority rings for packets */
- struct iq_ring *iq[SW_IQS_MAX];
+ struct sw_iq iq[SW_IQS_MAX];
uint32_t iq_pkt_mask; /* A mask to indicate packets in an IQ */
uint64_t iq_pkt_count[SW_IQS_MAX];
@@ -201,6 +166,7 @@ struct sw_port {
uint16_t outstanding_releases __rte_cache_aligned;
uint16_t inflight_max; /* app requested max inflights for this port */
uint16_t inflight_credits; /* num credits this port has right now */
+ uint8_t implicit_release; /* release events before dequeueing */
uint16_t last_dequeue_burst_sz; /* how big the burst was */
uint64_t last_dequeue_ticks; /* used to track burst processing time */
@@ -253,6 +219,8 @@ struct sw_evdev {
/* Internal queues - one per logical queue */
struct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
+ struct sw_queue_chunk *chunk_list_head;
+ struct sw_queue_chunk *chunks;
/* Cache how many packets are in each cq */
uint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned;
@@ -319,5 +287,6 @@ int sw_xstats_reset(struct rte_eventdev *dev,
const uint32_t ids[],
uint32_t nb_ids);
+int test_sw_eventdev(void);
#endif /* _SW_EVDEV_H_ */
diff --git a/drivers/event/sw/sw_evdev_log.h b/drivers/event/sw/sw_evdev_log.h
new file mode 100644
index 00000000..f76825ab
--- /dev/null
+++ b/drivers/event/sw/sw_evdev_log.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _SW_EVDEV_LOG_H_
+#define _SW_EVDEV_LOG_H_
+
+extern int eventdev_sw_log_level;
+
+#define SW_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eventdev_sw_log_level, "%s" fmt "\n", \
+ __func__, ##args)
+
+#define SW_LOG_INFO(fmt, args...) \
+ SW_LOG_IMPL(INFO, fmt, ## args)
+
+#define SW_LOG_DBG(fmt, args...) \
+ SW_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define SW_LOG_ERR(fmt, args...) \
+ SW_LOG_IMPL(ERR, fmt, ## args)
+
+#endif /* _SW_EVDEV_LOG_H_ */
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index 8a2c9d4f..3106eb33 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -1,40 +1,12 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_ring.h>
#include <rte_hash_crc.h>
#include <rte_event_ring.h>
#include "sw_evdev.h"
-#include "iq_ring.h"
+#include "iq_chunk.h"
#define SW_IQS_MASK (SW_IQS_MAX-1)
@@ -71,7 +43,7 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
*/
uint32_t qid_id = qid->id;
- iq_ring_dequeue_burst(qid->iq[iq_num], qes, count);
+ iq_dequeue_burst(sw, &qid->iq[iq_num], qes, count);
for (i = 0; i < count; i++) {
const struct rte_event *qe = &qes[i];
const uint16_t flow_id = SW_HASH_FLOWID(qes[i].flow_id);
@@ -130,7 +102,7 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
p->cq_buf_count = 0;
}
}
- iq_ring_put_back(qid->iq[iq_num], blocked_qes, nb_blocked);
+ iq_put_back(sw, &qid->iq[iq_num], blocked_qes, nb_blocked);
return count - nb_blocked;
}
@@ -156,7 +128,7 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
rte_ring_count(qid->reorder_buffer_freelist));
for (i = 0; i < count; i++) {
- const struct rte_event *qe = iq_ring_peek(qid->iq[iq_num]);
+ const struct rte_event *qe = iq_peek(&qid->iq[iq_num]);
uint32_t cq_check_count = 0;
uint32_t cq;
@@ -193,7 +165,7 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
(void *)&p->hist_list[head].rob_entry);
sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe;
- iq_ring_pop(qid->iq[iq_num]);
+ iq_pop(sw, &qid->iq[iq_num]);
rte_compiler_barrier();
p->inflights++;
@@ -218,8 +190,8 @@ sw_schedule_dir_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
return 0;
/* burst dequeue from the QID IQ ring */
- struct iq_ring *ring = qid->iq[iq_num];
- uint32_t ret = iq_ring_dequeue_burst(ring,
+ struct sw_iq *iq = &qid->iq[iq_num];
+ uint32_t ret = iq_dequeue_burst(sw, iq,
&port->cq_buf[port->cq_buf_count], count_free);
port->cq_buf_count += ret;
@@ -252,7 +224,7 @@ sw_schedule_qid_to_cq(struct sw_evdev *sw)
continue;
uint32_t pkts_done = 0;
- uint32_t count = iq_ring_count(qid->iq[iq_num]);
+ uint32_t count = iq_count(&qid->iq[iq_num]);
if (count > 0) {
if (type == SW_SCHED_TYPE_DIRECT)
@@ -324,22 +296,15 @@ sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end)
continue;
}
- struct sw_qid *dest_qid_ptr =
- &sw->qids[dest_qid];
- const struct iq_ring *dest_iq_ptr =
- dest_qid_ptr->iq[dest_iq];
- if (iq_ring_free_count(dest_iq_ptr) == 0)
- break;
-
pkts_iter++;
struct sw_qid *q = &sw->qids[dest_qid];
- struct iq_ring *r = q->iq[dest_iq];
+ struct sw_iq *iq = &q->iq[dest_iq];
/* we checked for space above, so enqueue must
* succeed
*/
- iq_ring_enqueue(r, qe);
+ iq_enqueue(sw, iq, qe);
q->iq_pkt_mask |= (1 << (dest_iq));
q->iq_pkt_count[dest_iq]++;
q->stats.rx_pkts++;
@@ -404,10 +369,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
uint32_t iq_num = PRIO_TO_IQ(qe->priority);
struct sw_qid *qid = &sw->qids[qe->queue_id];
- if ((flags & QE_FLAG_VALID) &&
- iq_ring_free_count(qid->iq[iq_num]) == 0)
- break;
-
/* now process based on flags. Note that for directed
* queues, the enqueue_flush masks off all but the
* valid flag. This makes FWD and PARTIAL enqueues just
@@ -471,7 +432,7 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
*/
qid->iq_pkt_mask |= (1 << (iq_num));
- iq_ring_enqueue(qid->iq[iq_num], qe);
+ iq_enqueue(sw, &qid->iq[iq_num], qe);
qid->iq_pkt_count[iq_num]++;
qid->stats.rx_pkts++;
pkts_iter++;
@@ -516,10 +477,7 @@ sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id)
uint32_t iq_num = PRIO_TO_IQ(qe->priority);
struct sw_qid *qid = &sw->qids[qe->queue_id];
- struct iq_ring *iq_ring = qid->iq[iq_num];
-
- if (iq_ring_free_count(iq_ring) == 0)
- break; /* move to next port */
+ struct sw_iq *iq = &qid->iq[iq_num];
port->stats.rx_pkts++;
@@ -527,7 +485,7 @@ sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id)
* into the qid at the right priority
*/
qid->iq_pkt_mask |= (1 << (iq_num));
- iq_ring_enqueue(iq_ring, qe);
+ iq_enqueue(sw, iq, qe);
qid->iq_pkt_count[iq_num]++;
qid->stats.rx_pkts++;
pkts_iter++;
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
new file mode 100644
index 00000000..78d30e07
--- /dev/null
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -0,0 +1,3245 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_pause.h>
+#include <rte_service.h>
+#include <rte_service_component.h>
+#include <rte_bus_vdev.h>
+
+#include "sw_evdev.h"
+
+#define MAX_PORTS 16
+#define MAX_QIDS 16
+#define NUM_PACKETS (1<<18)
+
+static int evdev;
+
+struct test {
+ struct rte_mempool *mbuf_pool;
+ uint8_t port[MAX_PORTS];
+ uint8_t qid[MAX_QIDS];
+ int nb_qids;
+ uint32_t service_id;
+};
+
+static struct rte_event release_ev;
+
+static inline struct rte_mbuf *
+rte_gen_arp(int portid, struct rte_mempool *mp)
+{
+ /*
+ * len = 14 + 46
+ * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
+ */
+ static const uint8_t arp_request[] = {
+ /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
+ 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
+ /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
+ 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
+ /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ };
+ struct rte_mbuf *m;
+ int pkt_len = sizeof(arp_request) - 1;
+
+ m = rte_pktmbuf_alloc(mp);
+ if (!m)
+ return 0;
+
+ memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
+ arp_request, pkt_len);
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ rte_pktmbuf_data_len(m) = pkt_len;
+
+ RTE_SET_USED(portid);
+
+ return m;
+}
+
+static void
+xstats_print(void)
+{
+ const uint32_t XSTATS_MAX = 1024;
+ uint32_t i;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+ /* Device names / values */
+ int ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (ret < 0) {
+ printf("%d: xstats names get() returned error\n",
+ __LINE__);
+ return;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, ret);
+ if (ret > (signed int)XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+ for (i = 0; (signed int)i < ret; i++) {
+ printf("%d : %s : %"PRIu64"\n",
+ i, xstats_names[i].name, values[i]);
+ }
+
+ /* Port names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ xstats_names, ids, XSTATS_MAX);
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 1,
+ ids, values, ret);
+ if (ret > (signed int)XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+ for (i = 0; (signed int)i < ret; i++) {
+ printf("%d : %s : %"PRIu64"\n",
+ i, xstats_names[i].name, values[i]);
+ }
+
+ /* Queue names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 1, ids, values, ret);
+ if (ret > (signed int)XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+ for (i = 0; (signed int)i < ret; i++) {
+ printf("%d : %s : %"PRIu64"\n",
+ i, xstats_names[i].name, values[i]);
+ }
+}
+
+/* initialization and config */
+static inline int
+init(struct test *t, int nb_queues, int nb_ports)
+{
+ struct rte_event_dev_config config = {
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_event_queue_flows = 1024,
+ .nb_events_limit = 4096,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ int ret;
+
+ void *temp = t->mbuf_pool; /* save and restore mbuf pool */
+
+ memset(t, 0, sizeof(*t));
+ t->mbuf_pool = temp;
+
+ ret = rte_event_dev_configure(evdev, &config);
+ if (ret < 0)
+ printf("%d: Error configuring device\n", __LINE__);
+ return ret;
+};
+
+static inline int
+create_ports(struct test *t, int num_ports)
+{
+ int i;
+ static const struct rte_event_port_conf conf = {
+ .new_event_threshold = 1024,
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ .disable_implicit_release = 0,
+ };
+ if (num_ports > MAX_PORTS)
+ return -1;
+
+ for (i = 0; i < num_ports; i++) {
+ if (rte_event_port_setup(evdev, i, &conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+ t->port[i] = i;
+ }
+
+ return 0;
+}
+
+static inline int
+create_lb_qids(struct test *t, int num_qids, uint32_t flags)
+{
+ int i;
+
+ /* Q creation */
+ const struct rte_event_queue_conf conf = {
+ .schedule_type = flags,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+ }
+ t->nb_qids += num_qids;
+ if (t->nb_qids > MAX_QIDS)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+create_atomic_qids(struct test *t, int num_qids)
+{
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
+}
+
+static inline int
+create_ordered_qids(struct test *t, int num_qids)
+{
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
+}
+
+
+static inline int
+create_unordered_qids(struct test *t, int num_qids)
+{
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
+}
+
+static inline int
+create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
+{
+ int i;
+
+ /* Q creation */
+ static const struct rte_event_queue_conf conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+ };
+
+ for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+
+ if (rte_event_port_link(evdev, ports[i - t->nb_qids],
+ &t->qid[i], NULL, 1) != 1) {
+ printf("%d: error creating link for qid %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ }
+ t->nb_qids += num_qids;
+ if (t->nb_qids > MAX_QIDS)
+ return -1;
+
+ return 0;
+}
+
+/* destruction */
+static inline int
+cleanup(struct test *t __rte_unused)
+{
+ rte_event_dev_stop(evdev);
+ rte_event_dev_close(evdev);
+ return 0;
+};
+
+struct test_event_dev_stats {
+ uint64_t rx_pkts; /**< Total packets received */
+ uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
+ uint64_t tx_pkts; /**< Total packets transmitted */
+
+ /** Packets received on this port */
+ uint64_t port_rx_pkts[MAX_PORTS];
+ /** Packets dropped on this port */
+ uint64_t port_rx_dropped[MAX_PORTS];
+ /** Packets inflight on this port */
+ uint64_t port_inflight[MAX_PORTS];
+ /** Packets transmitted on this port */
+ uint64_t port_tx_pkts[MAX_PORTS];
+ /** Packets received on this qid */
+ uint64_t qid_rx_pkts[MAX_QIDS];
+ /** Packets dropped on this qid */
+ uint64_t qid_rx_dropped[MAX_QIDS];
+ /** Packets transmitted on this qid */
+ uint64_t qid_tx_pkts[MAX_QIDS];
+};
+
+static inline int
+test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
+{
+ static uint32_t i;
+ static uint32_t total_ids[3]; /* rx, tx and drop */
+ static uint32_t port_rx_pkts_ids[MAX_PORTS];
+ static uint32_t port_rx_dropped_ids[MAX_PORTS];
+ static uint32_t port_inflight_ids[MAX_PORTS];
+ static uint32_t port_tx_pkts_ids[MAX_PORTS];
+ static uint32_t qid_rx_pkts_ids[MAX_QIDS];
+ static uint32_t qid_rx_dropped_ids[MAX_QIDS];
+ static uint32_t qid_tx_pkts_ids[MAX_QIDS];
+
+
+ stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
+ "dev_rx", &total_ids[0]);
+ stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
+ "dev_drop", &total_ids[1]);
+ stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
+ "dev_tx", &total_ids[2]);
+ for (i = 0; i < MAX_PORTS; i++) {
+ char name[32];
+ snprintf(name, sizeof(name), "port_%u_rx", i);
+ stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_rx_pkts_ids[i]);
+ snprintf(name, sizeof(name), "port_%u_drop", i);
+ stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_rx_dropped_ids[i]);
+ snprintf(name, sizeof(name), "port_%u_inflight", i);
+ stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_inflight_ids[i]);
+ snprintf(name, sizeof(name), "port_%u_tx", i);
+ stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_tx_pkts_ids[i]);
+ }
+ for (i = 0; i < MAX_QIDS; i++) {
+ char name[32];
+ snprintf(name, sizeof(name), "qid_%u_rx", i);
+ stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &qid_rx_pkts_ids[i]);
+ snprintf(name, sizeof(name), "qid_%u_drop", i);
+ stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &qid_rx_dropped_ids[i]);
+ snprintf(name, sizeof(name), "qid_%u_tx", i);
+ stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &qid_tx_pkts_ids[i]);
+ }
+
+ return 0;
+}
+
+/* run_prio_packet_test
+ * This performs a basic packet priority check on the test instance passed in.
+ * It is factored out of the main priority tests as the same tests must be
+ * performed to ensure prioritization of each type of QID.
+ *
+ * Requirements:
+ * - An initialized test structure, including mempool
+ * - t->port[0] is initialized for both Enq / Deq of packets to the QID
+ * - t->qid[0] is the QID to be tested
+ * - if LB QID, the CQ must be mapped to the QID.
+ */
+static int
+run_prio_packet_test(struct test *t)
+{
+ int err;
+ const uint32_t MAGIC_SEQN[] = {4711, 1234};
+ const uint32_t PRIORITY[] = {
+ RTE_EVENT_DEV_PRIORITY_NORMAL,
+ RTE_EVENT_DEV_PRIORITY_HIGHEST
+ };
+ unsigned int i;
+ for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
+ /* generate pkt and enqueue */
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ arp->seqn = MAGIC_SEQN[i];
+
+ ev = (struct rte_event){
+ .priority = PRIORITY[i],
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .mbuf = arp
+ };
+ err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err < 0) {
+ printf("%d: error failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ struct test_event_dev_stats stats;
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: error failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.port_rx_pkts[t->port[0]] != 2) {
+ printf("%d: error stats incorrect for directed port\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ struct rte_event ev, ev2;
+ uint32_t deq_pkts;
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
+ printf("%d: first packet out not highest priority\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ rte_pktmbuf_free(ev.mbuf);
+
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
+ printf("%d: second packet out not lower priority\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ rte_pktmbuf_free(ev2.mbuf);
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+test_single_directed_packet(struct test *t)
+{
+ const int rx_enq = 0;
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 3 directed QIDs going to 3 ports */
+ if (init(t, 3, 3) < 0 ||
+ create_ports(t, 3) < 0 ||
+ create_directed_qids(t, 3, t->port) < 0)
+ return -1;
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = wrk_enq,
+ .mbuf = arp,
+ };
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ const uint32_t MAGIC_SEQN = 4711;
+ arp->seqn = MAGIC_SEQN;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
+ if (err < 0) {
+ printf("%d: error failed to enqueue\n", __LINE__);
+ return -1;
+ }
+
+ /* Run schedule() as dir packets may need to be re-ordered */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ struct test_event_dev_stats stats;
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: error failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.port_rx_pkts[rx_enq] != 1) {
+ printf("%d: error stats incorrect for directed port\n",
+ __LINE__);
+ return -1;
+ }
+
+ uint32_t deq_pkts;
+ deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ return -1;
+ }
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_rx_pkts[wrk_enq] != 0 &&
+ stats.port_rx_pkts[wrk_enq] != 1) {
+ printf("%d: error directed stats post-dequeue\n", __LINE__);
+ return -1;
+ }
+
+ if (ev.mbuf->seqn != MAGIC_SEQN) {
+ printf("%d: error magic sequence number not dequeued\n",
+ __LINE__);
+ return -1;
+ }
+
+ rte_pktmbuf_free(ev.mbuf);
+ cleanup(t);
+ return 0;
+}
+
+static int
+test_directed_forward_credits(struct test *t)
+{
+ uint32_t i;
+ int32_t err;
+
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_directed_qids(t, 1, t->port) < 0)
+ return -1;
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = 0,
+ };
+
+ for (i = 0; i < 1000; i++) {
+ err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ if (err < 0) {
+ printf("%d: error failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ uint32_t deq_pkts;
+ deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ return -1;
+ }
+
+ /* re-write event to be a forward, and continue looping it */
+ ev.op = RTE_EVENT_OP_FORWARD;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+
+static int
+test_priority_directed(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_directed_qids(t, 1, t->port) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+test_priority_atomic(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* map the QID */
+ if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping qid to port\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+test_priority_ordered(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_ordered_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* map the QID */
+ if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping qid to port\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+test_priority_unordered(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_unordered_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* map the QID */
+ if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping qid to port\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+burst_packets(struct test *t)
+{
+ /************** CONFIG ****************/
+ uint32_t i;
+ int err;
+ int ret;
+
+ /* Create instance with 2 ports and 2 queues */
+ if (init(t, 2, 2) < 0 ||
+ create_ports(t, 2) < 0 ||
+ create_atomic_qids(t, 2) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
+ if (ret != 1) {
+ printf("%d: error mapping lb qid0\n", __LINE__);
+ return -1;
+ }
+ ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
+ if (ret != 1) {
+ printf("%d: error mapping lb qid1\n", __LINE__);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+ const uint32_t rx_port = 0;
+ const uint32_t NUM_PKTS = 2;
+
+ for (i = 0; i < NUM_PKTS; i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: error generating pkt\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = i % 2,
+ .flow_id = i % 3,
+ .mbuf = arp,
+ };
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* Check stats for all NUM_PKTS arrived to sched core */
+ struct test_event_dev_stats stats;
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+ if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
+ printf("%d: Sched core didn't receive all %d pkts\n",
+ __LINE__, NUM_PKTS);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ uint32_t deq_pkts;
+ int p;
+
+ deq_pkts = 0;
+ /******** DEQ QID 1 *******/
+ do {
+ struct rte_event ev;
+ p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
+ deq_pkts += p;
+ rte_pktmbuf_free(ev.mbuf);
+ } while (p);
+
+ if (deq_pkts != NUM_PKTS/2) {
+ printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
+ __LINE__);
+ return -1;
+ }
+
+ /******** DEQ QID 2 *******/
+ deq_pkts = 0;
+ do {
+ struct rte_event ev;
+ p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
+ deq_pkts += p;
+ rte_pktmbuf_free(ev.mbuf);
+ } while (p);
+ if (deq_pkts != NUM_PKTS/2) {
+ printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
+ __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+abuse_inflights(struct test *t)
+{
+ const int rx_enq = 0;
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* Enqueue op only */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+
+ /* schedule */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ struct test_event_dev_stats stats;
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.rx_pkts != 0 ||
+ stats.tx_pkts != 0 ||
+ stats.port_inflight[wrk_enq] != 0) {
+ printf("%d: Sched core didn't handle pkt as expected\n",
+ __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+xstats_tests(struct test *t)
+{
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ const uint32_t XSTATS_MAX = 1024;
+
+ uint32_t i;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+ /* Device names / values */
+ int ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, xstats_names, ids, XSTATS_MAX);
+ if (ret != 6) {
+ printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, ret);
+ if (ret != 6) {
+ printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* Port names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (ret != 21) {
+ printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ ids, values, ret);
+ if (ret != 21) {
+ printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* Queue names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 0, xstats_names, ids, XSTATS_MAX);
+ if (ret != 16) {
+ printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 1, ids, values, ret);
+ if (ret != -EINVAL) {
+ printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 0, ids, values, ret);
+ if (ret != 16) {
+ printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* enqueue packets to check values */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ ev.queue_id = t->qid[i];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ ev.flow_id = 7;
+ arp->seqn = i;
+
+ int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* Device names / values */
+ int num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats < 0)
+ goto fail;
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, num_stats);
+ static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
+ for (i = 0; (signed int)i < ret; i++) {
+ if (expected[i] != values[i]) {
+ printf(
+ "%d Error xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], expected[i]);
+ goto fail;
+ }
+ }
+
+ ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, NULL, 0);
+
+ /* ensure reset statistics are zero-ed */
+ static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, num_stats);
+ for (i = 0; (signed int)i < ret; i++) {
+ if (expected_zero[i] != values[i]) {
+ printf(
+ "%d Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], expected_zero[i]);
+ goto fail;
+ }
+ }
+
+ /* port reset checks */
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats < 0)
+ goto fail;
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
+ 0, ids, values, num_stats);
+
+ static const uint64_t port_expected[] = {
+ 3 /* rx */,
+ 0 /* tx */,
+ 0 /* drop */,
+ 0 /* inflights */,
+ 0 /* avg pkt cycles */,
+ 29 /* credits */,
+ 0 /* rx ring used */,
+ 4096 /* rx ring free */,
+ 0 /* cq ring used */,
+ 32 /* cq ring free */,
+ 0 /* dequeue calls */,
+ /* 10 dequeue burst buckets */
+ 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ };
+ if (ret != RTE_DIM(port_expected)) {
+ printf(
+ "%s %d: wrong number of port stats (%d), expected %zu\n",
+ __func__, __LINE__, ret, RTE_DIM(port_expected));
+ }
+
+ for (i = 0; (signed int)i < ret; i++) {
+ if (port_expected[i] != values[i]) {
+ printf(
+ "%s : %d: Error stat %s is %"PRIu64
+ ", expected %"PRIu64"\n",
+ __func__, __LINE__, xstats_names[i].name,
+ values[i], port_expected[i]);
+ goto fail;
+ }
+ }
+
+ ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
+ 0, NULL, 0);
+
+ /* ensure reset statistics are zero-ed */
+ static const uint64_t port_expected_zero[] = {
+ 0 /* rx */,
+ 0 /* tx */,
+ 0 /* drop */,
+ 0 /* inflights */,
+ 0 /* avg pkt cycles */,
+ 29 /* credits */,
+ 0 /* rx ring used */,
+ 4096 /* rx ring free */,
+ 0 /* cq ring used */,
+ 32 /* cq ring free */,
+ 0 /* dequeue calls */,
+ /* 10 dequeue burst buckets */
+ 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ };
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ 0, ids, values, num_stats);
+ for (i = 0; (signed int)i < ret; i++) {
+ if (port_expected_zero[i] != values[i]) {
+ printf(
+ "%d, Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], port_expected_zero[i]);
+ goto fail;
+ }
+ }
+
+ /* QUEUE STATS TESTS */
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
+ 0, ids, values, num_stats);
+ if (ret < 0) {
+ printf("xstats get returned %d\n", ret);
+ goto fail;
+ }
+ if ((unsigned int)ret > XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+
+ static const uint64_t queue_expected[] = {
+ 3 /* rx */,
+ 3 /* tx */,
+ 0 /* drop */,
+ 3 /* inflights */,
+ 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 3,
+ 0, 0,
+ };
+ for (i = 0; (signed int)i < ret; i++) {
+ if (queue_expected[i] != values[i]) {
+ printf(
+ "%d, Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], queue_expected[i]);
+ goto fail;
+ }
+ }
+
+ /* Reset the queue stats here */
+ ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ NULL,
+ 0);
+
+ /* Verify that the resetable stats are reset, and others are not */
+ static const uint64_t queue_expected_zero[] = {
+ 0 /* rx */,
+ 0 /* tx */,
+ 0 /* drop */,
+ 3 /* inflight */,
+ 0, 0, 0, 0, /* 4 iq used */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 0,
+ 0, 0,
+ };
+
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ ids, values, num_stats);
+ int fails = 0;
+ for (i = 0; (signed int)i < ret; i++) {
+ if (queue_expected_zero[i] != values[i]) {
+ printf(
+ "%d, Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], queue_expected_zero[i]);
+ fails++;
+ }
+ }
+ if (fails) {
+ printf("%d : %d of values were not as expected above\n",
+ __LINE__, fails);
+ goto fail;
+ }
+
+ cleanup(t);
+ return 0;
+
+fail:
+ rte_event_dev_dump(0, stdout);
+ cleanup(t);
+ return -1;
+}
+
+
+static int
+xstats_id_abuse_tests(struct test *t)
+{
+ int err;
+ const uint32_t XSTATS_MAX = 1024;
+ const uint32_t link_port = 2;
+
+ uint32_t ids[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ goto fail;
+ }
+
+ err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ /* no test for device, as it ignores the port/q number */
+ int num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ UINT8_MAX-1, xstats_names, ids,
+ XSTATS_MAX);
+ if (num_stats != 0) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ 0, num_stats);
+ goto fail;
+ }
+
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ UINT8_MAX-1, xstats_names, ids,
+ XSTATS_MAX);
+ if (num_stats != 0) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ 0, num_stats);
+ goto fail;
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+port_reconfig_credits(struct test *t)
+{
+ if (init(t, 1, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ uint32_t i;
+ const uint32_t NUM_ITERS = 32;
+ for (i = 0; i < NUM_ITERS; i++) {
+ const struct rte_event_queue_conf conf = {
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ return -1;
+ }
+ t->qid[0] = 0;
+
+ static const struct rte_event_port_conf port_conf = {
+ .new_event_threshold = 128,
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ .disable_implicit_release = 0,
+ };
+ if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
+ printf("%d Error setting up port\n", __LINE__);
+ return -1;
+ }
+
+ int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
+ if (links != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ const uint32_t NPKTS = 1;
+ uint32_t j;
+ for (j = 0; j < NPKTS; j++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto fail;
+ }
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ rte_event_dev_dump(0, stdout);
+ goto fail;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ struct rte_event ev[NPKTS];
+ int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
+ NPKTS, 0);
+ if (deq != 1)
+ printf("%d error; no packet dequeued\n", __LINE__);
+
+ /* let cleanup below stop the device on last iter */
+ if (i != NUM_ITERS-1)
+ rte_event_dev_stop(evdev);
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+port_single_lb_reconfig(struct test *t)
+{
+ if (init(t, 2, 2) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ goto fail;
+ }
+
+ static const struct rte_event_queue_conf conf_lb_atomic = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ goto fail;
+ }
+
+ static const struct rte_event_queue_conf conf_single_link = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+ };
+ if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ goto fail;
+ }
+
+ struct rte_event_port_conf port_conf = {
+ .new_event_threshold = 128,
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ .disable_implicit_release = 0,
+ };
+ if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
+ printf("%d Error setting up port\n", __LINE__);
+ goto fail;
+ }
+ if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
+ printf("%d Error setting up port\n", __LINE__);
+ goto fail;
+ }
+
+ /* link port to lb queue */
+ uint8_t queue_id = 0;
+ if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
+ printf("%d: error creating link for qid\n", __LINE__);
+ goto fail;
+ }
+
+ int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
+ if (ret != 1) {
+ printf("%d: Error unlinking lb port\n", __LINE__);
+ goto fail;
+ }
+
+ queue_id = 1;
+ if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
+ printf("%d: error creating link for qid\n", __LINE__);
+ goto fail;
+ }
+
+ queue_id = 0;
+ int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+xstats_brute_force(struct test *t)
+{
+ uint32_t i;
+ const uint32_t XSTATS_MAX = 1024;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+ for (i = 0; i < 3; i++) {
+ uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
+ uint32_t j;
+ for (j = 0; j < UINT8_MAX; j++) {
+ rte_event_dev_xstats_names_get(evdev, mode,
+ j, xstats_names, ids, XSTATS_MAX);
+
+ rte_event_dev_xstats_get(evdev, mode, j, ids,
+ values, XSTATS_MAX);
+ }
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+xstats_id_reset_tests(struct test *t)
+{
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+#define XSTATS_MAX 1024
+ int ret;
+ uint32_t i;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+#define NUM_DEV_STATS 6
+ /* Device names / values */
+ int num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, xstats_names, ids, XSTATS_MAX);
+ if (num_stats != NUM_DEV_STATS) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ NUM_DEV_STATS, num_stats);
+ goto fail;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, num_stats);
+ if (ret != NUM_DEV_STATS) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ NUM_DEV_STATS, ret);
+ goto fail;
+ }
+
+#define NPKTS 7
+ for (i = 0; i < NPKTS; i++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto fail;
+ }
+ ev.queue_id = t->qid[i];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ arp->seqn = i;
+
+ int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ goto fail;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ static const char * const dev_names[] = {
+ "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
+ "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
+ };
+ uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
+ for (i = 0; (int)i < ret; i++) {
+ unsigned int id;
+ uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+ dev_names[i],
+ &id);
+ if (id != i) {
+ printf("%d: %s id incorrect, expected %d got %d\n",
+ __LINE__, dev_names[i], i, id);
+ goto fail;
+ }
+ if (val != dev_expected[i]) {
+ printf("%d: %s value incorrect, expected %"
+ PRIu64" got %d\n", __LINE__, dev_names[i],
+ dev_expected[i], id);
+ goto fail;
+ }
+ /* reset to zero */
+ int reset_ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+ &id,
+ 1);
+ if (reset_ret) {
+ printf("%d: failed to reset successfully\n", __LINE__);
+ goto fail;
+ }
+ dev_expected[i] = 0;
+ /* check value again */
+ val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
+ if (val != dev_expected[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %"PRIu64"\n", __LINE__, dev_names[i],
+ dev_expected[i], val);
+ goto fail;
+ }
+ };
+
+/* 48 is stat offset from start of the devices whole xstats.
+ * This WILL break every time we add a statistic to a port
+ * or the device, but there is no other way to test
+ */
+#define PORT_OFF 48
+/* num stats for the tested port. CQ size adds more stats to a port */
+#define NUM_PORT_STATS 21
+/* the port to test. */
+#define PORT 2
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, PORT,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats != NUM_PORT_STATS) {
+ printf("%d: expected %d stats, got return %d\n",
+ __LINE__, NUM_PORT_STATS, num_stats);
+ goto fail;
+ }
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
+ ids, values, num_stats);
+
+ if (ret != NUM_PORT_STATS) {
+ printf("%d: expected %d stats, got return %d\n",
+ __LINE__, NUM_PORT_STATS, ret);
+ goto fail;
+ }
+ static const char * const port_names[] = {
+ "port_2_rx",
+ "port_2_tx",
+ "port_2_drop",
+ "port_2_inflight",
+ "port_2_avg_pkt_cycles",
+ "port_2_credits",
+ "port_2_rx_ring_used",
+ "port_2_rx_ring_free",
+ "port_2_cq_ring_used",
+ "port_2_cq_ring_free",
+ "port_2_dequeue_calls",
+ "port_2_dequeues_returning_0",
+ "port_2_dequeues_returning_1-4",
+ "port_2_dequeues_returning_5-8",
+ "port_2_dequeues_returning_9-12",
+ "port_2_dequeues_returning_13-16",
+ "port_2_dequeues_returning_17-20",
+ "port_2_dequeues_returning_21-24",
+ "port_2_dequeues_returning_25-28",
+ "port_2_dequeues_returning_29-32",
+ "port_2_dequeues_returning_33-36",
+ };
+ uint64_t port_expected[] = {
+ 0, /* rx */
+ NPKTS, /* tx */
+ 0, /* drop */
+ NPKTS, /* inflight */
+ 0, /* avg pkt cycles */
+ 0, /* credits */
+ 0, /* rx ring used */
+ 4096, /* rx ring free */
+ NPKTS, /* cq ring used */
+ 25, /* cq ring free */
+ 0, /* dequeue zero calls */
+ 0, 0, 0, 0, 0, /* 10 dequeue buckets */
+ 0, 0, 0, 0, 0,
+ };
+ uint64_t port_expected_zero[] = {
+ 0, /* rx */
+ 0, /* tx */
+ 0, /* drop */
+ NPKTS, /* inflight */
+ 0, /* avg pkt cycles */
+ 0, /* credits */
+ 0, /* rx ring used */
+ 4096, /* rx ring free */
+ NPKTS, /* cq ring used */
+ 25, /* cq ring free */
+ 0, /* dequeue zero calls */
+ 0, 0, 0, 0, 0, /* 10 dequeue buckets */
+ 0, 0, 0, 0, 0,
+ };
+ if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
+ RTE_DIM(port_names) != NUM_PORT_STATS) {
+ printf("%d: port array of wrong size\n", __LINE__);
+ goto fail;
+ }
+
+ int failed = 0;
+ for (i = 0; (int)i < ret; i++) {
+ unsigned int id;
+ uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+ port_names[i],
+ &id);
+ if (id != i + PORT_OFF) {
+ printf("%d: %s id incorrect, expected %d got %d\n",
+ __LINE__, port_names[i], i+PORT_OFF,
+ id);
+ failed = 1;
+ }
+ if (val != port_expected[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %d\n", __LINE__, port_names[i],
+ port_expected[i], id);
+ failed = 1;
+ }
+ /* reset to zero */
+ int reset_ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, PORT,
+ &id,
+ 1);
+ if (reset_ret) {
+ printf("%d: failed to reset successfully\n", __LINE__);
+ failed = 1;
+ }
+ /* check value again */
+ val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
+ if (val != port_expected_zero[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %"PRIu64"\n", __LINE__, port_names[i],
+ port_expected_zero[i], val);
+ failed = 1;
+ }
+ };
+ if (failed)
+ goto fail;
+
+/* num queue stats */
+#define NUM_Q_STATS 16
+/* queue offset from start of the devices whole xstats.
+ * This will break every time we add a statistic to a device/port/queue
+ */
+#define QUEUE_OFF 90
+ const uint32_t queue = 0;
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, queue,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats != NUM_Q_STATS) {
+ printf("%d: expected %d stats, got return %d\n",
+ __LINE__, NUM_Q_STATS, num_stats);
+ goto fail;
+ }
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
+ queue, ids, values, num_stats);
+ if (ret != NUM_Q_STATS) {
+ printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+ goto fail;
+ }
+ static const char * const queue_names[] = {
+ "qid_0_rx",
+ "qid_0_tx",
+ "qid_0_drop",
+ "qid_0_inflight",
+ "qid_0_iq_0_used",
+ "qid_0_iq_1_used",
+ "qid_0_iq_2_used",
+ "qid_0_iq_3_used",
+ "qid_0_port_0_pinned_flows",
+ "qid_0_port_0_packets",
+ "qid_0_port_1_pinned_flows",
+ "qid_0_port_1_packets",
+ "qid_0_port_2_pinned_flows",
+ "qid_0_port_2_packets",
+ "qid_0_port_3_pinned_flows",
+ "qid_0_port_3_packets",
+ };
+ uint64_t queue_expected[] = {
+ 7, /* rx */
+ 7, /* tx */
+ 0, /* drop */
+ 7, /* inflight */
+ 0, /* iq 0 used */
+ 0, /* iq 1 used */
+ 0, /* iq 2 used */
+ 0, /* iq 3 used */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 7,
+ 0, 0,
+ };
+ uint64_t queue_expected_zero[] = {
+ 0, /* rx */
+ 0, /* tx */
+ 0, /* drop */
+ 7, /* inflight */
+ 0, /* iq 0 used */
+ 0, /* iq 1 used */
+ 0, /* iq 2 used */
+ 0, /* iq 3 used */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 0,
+ 0, 0,
+ };
+ if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
+ RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
+ RTE_DIM(queue_names) != NUM_Q_STATS) {
+ printf("%d : queue array of wrong size\n", __LINE__);
+ goto fail;
+ }
+
+ failed = 0;
+ for (i = 0; (int)i < ret; i++) {
+ unsigned int id;
+ uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+ queue_names[i],
+ &id);
+ if (id != i + QUEUE_OFF) {
+ printf("%d: %s id incorrect, expected %d got %d\n",
+ __LINE__, queue_names[i], i+QUEUE_OFF,
+ id);
+ failed = 1;
+ }
+ if (val != queue_expected[i]) {
+ printf("%d: %d: %s value , expected %"PRIu64
+ " got %"PRIu64"\n", i, __LINE__,
+ queue_names[i], queue_expected[i], val);
+ failed = 1;
+ }
+ /* reset to zero */
+ int reset_ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ queue, &id, 1);
+ if (reset_ret) {
+ printf("%d: failed to reset successfully\n", __LINE__);
+ failed = 1;
+ }
+ /* check value again */
+ val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
+ 0);
+ if (val != queue_expected_zero[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %"PRIu64"\n", __LINE__, queue_names[i],
+ queue_expected_zero[i], val);
+ failed = 1;
+ }
+ };
+
+ if (failed)
+ goto fail;
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+ordered_reconfigure(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ const struct rte_event_queue_conf conf = {
+ .schedule_type = RTE_SCHED_TYPE_ORDERED,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ goto failed;
+ }
+
+ if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+ printf("%d: error creating qid, for 2nd time\n", __LINE__);
+ goto failed;
+ }
+
+ rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+failed:
+ cleanup(t);
+ return -1;
+}
+
+static int
+qid_priorities(struct test *t)
+{
+ /* Test works by having a CQ with enough empty space for all packets,
+ * and enqueueing 3 packets to 3 QIDs. They must return based on the
+ * priority of the QID, not the ingress order, to pass the test
+ */
+ unsigned int i;
+ /* Create instance with 1 ports, and 3 qids */
+ if (init(t, 3, 1) < 0 ||
+ create_ports(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < 3; i++) {
+ /* Create QID */
+ const struct rte_event_queue_conf conf = {
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ /* increase priority (0 == highest), as we go */
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+ }
+ t->nb_qids = i;
+ /* map all QIDs to port */
+ rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* enqueue 3 packets, setting seqn and QID to check priority */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ ev.queue_id = t->qid[i];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ arp->seqn = i;
+
+ int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* dequeue packets, verify priority was upheld */
+ struct rte_event ev[32];
+ uint32_t deq_pkts =
+ rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
+ if (deq_pkts != 3) {
+ printf("%d: failed to deq packets\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ for (i = 0; i < 3; i++) {
+ if (ev[i].mbuf->seqn != 2-i) {
+ printf(
+ "%d: qid priority test: seqn %d incorrectly prioritized\n",
+ __LINE__, i);
+ }
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+load_balancing(struct test *t)
+{
+ const int rx_enq = 0;
+ int err;
+ uint32_t i;
+
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < 3; i++) {
+ /* map port 1 - 3 inclusive */
+ if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
+ NULL, 1) != 1) {
+ printf("%d: error mapping qid to port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+ /*
+ * Create a set of flows that test the load-balancing operation of the
+ * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
+ * with a new flow, which should be sent to the 3rd mapped CQ
+ */
+ static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
+
+ for (i = 0; i < RTE_DIM(flows); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .flow_id = flows[i],
+ .mbuf = arp,
+ };
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ struct test_event_dev_stats stats;
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.port_inflight[1] != 4) {
+ printf("%d:%s: port 1 inflight not correct\n", __LINE__,
+ __func__);
+ return -1;
+ }
+ if (stats.port_inflight[2] != 2) {
+ printf("%d:%s: port 2 inflight not correct\n", __LINE__,
+ __func__);
+ return -1;
+ }
+ if (stats.port_inflight[3] != 3) {
+ printf("%d:%s: port 3 inflight not correct\n", __LINE__,
+ __func__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+load_balancing_history(struct test *t)
+{
+ struct test_event_dev_stats stats = {0};
+ const int rx_enq = 0;
+ int err;
+ uint32_t i;
+
+ /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0)
+ return -1;
+
+ /* CQ mapping to QID */
+ if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping port 1 qid\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping port 2 qid\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping port 3 qid\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Create a set of flows that test the load-balancing operation of the
+ * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
+ * the packet from CQ 0, send in a new set of flows. Ensure that:
+ * 1. The new flow 3 gets into the empty CQ0
+ * 2. packets for existing flow gets added into CQ1
+ * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
+ * more outstanding pkts
+ *
+ * This test makes sure that when a flow ends (i.e. all packets
+ * have been completed for that flow), that the flow can be moved
+ * to a different CQ when new packets come in for that flow.
+ */
+ static uint32_t flows1[] = {0, 1, 1, 2};
+
+ for (i = 0; i < RTE_DIM(flows1); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ struct rte_event ev = {
+ .flow_id = flows1[i],
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .event_type = RTE_EVENT_TYPE_CPU,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .mbuf = arp
+ };
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ arp->hash.rss = flows1[i];
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* call the scheduler */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* Dequeue the flow 0 packet from port 1, so that we can then drop */
+ struct rte_event ev;
+ if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
+ printf("%d: failed to dequeue\n", __LINE__);
+ return -1;
+ }
+ if (ev.mbuf->hash.rss != flows1[0]) {
+ printf("%d: unexpected flow received\n", __LINE__);
+ return -1;
+ }
+
+ /* drop the flow 0 packet from port 1 */
+ rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
+
+ /* call the scheduler */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /*
+ * Set up the next set of flows, first a new flow to fill up
+ * CQ 0, so that the next flow 0 packet should go to CQ2
+ */
+ static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
+
+ for (i = 0; i < RTE_DIM(flows2); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ struct rte_event ev = {
+ .flow_id = flows2[i],
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .event_type = RTE_EVENT_TYPE_CPU,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .mbuf = arp
+ };
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ arp->hash.rss = flows2[i];
+
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* schedule */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d:failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Now check the resulting inflights on each port.
+ */
+ if (stats.port_inflight[1] != 3) {
+ printf("%d:%s: port 1 inflight not correct\n", __LINE__,
+ __func__);
+ printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+ (unsigned int)stats.port_inflight[1],
+ (unsigned int)stats.port_inflight[2],
+ (unsigned int)stats.port_inflight[3]);
+ return -1;
+ }
+ if (stats.port_inflight[2] != 4) {
+ printf("%d:%s: port 2 inflight not correct\n", __LINE__,
+ __func__);
+ printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+ (unsigned int)stats.port_inflight[1],
+ (unsigned int)stats.port_inflight[2],
+ (unsigned int)stats.port_inflight[3]);
+ return -1;
+ }
+ if (stats.port_inflight[3] != 2) {
+ printf("%d:%s: port 3 inflight not correct\n", __LINE__,
+ __func__);
+ printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+ (unsigned int)stats.port_inflight[1],
+ (unsigned int)stats.port_inflight[2],
+ (unsigned int)stats.port_inflight[3]);
+ return -1;
+ }
+
+ for (i = 1; i <= 3; i++) {
+ struct rte_event ev;
+ while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
+ rte_event_enqueue_burst(evdev, i, &release_ev, 1);
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+invalid_qid(struct test *t)
+{
+ struct test_event_dev_stats stats;
+ const int rx_enq = 0;
+ int err;
+ uint32_t i;
+
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ for (i = 0; i < 4; i++) {
+ err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
+ NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping port 1 qid\n", __LINE__);
+ return -1;
+ }
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Send in a packet with an invalid qid to the scheduler.
+ * We should see the packed enqueued OK, but the inflights for
+ * that packet should not be incremented, and the rx_dropped
+ * should be incremented.
+ */
+ static uint32_t flows1[] = {20};
+
+ for (i = 0; i < RTE_DIM(flows1); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0] + flows1[i],
+ .flow_id = i,
+ .mbuf = arp,
+ };
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* call the scheduler */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Now check the resulting inflights on the port, and the rx_dropped.
+ */
+ if (stats.port_inflight[0] != 0) {
+ printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
+ __func__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ if (stats.port_rx_dropped[0] != 1) {
+ printf("%d:%s: port 1 drops\n", __LINE__, __func__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ /* each packet drop should only be counted in one place - port or dev */
+ if (stats.rx_dropped != 0) {
+ printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
+ __func__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+single_packet(struct test *t)
+{
+ const uint32_t MAGIC_SEQN = 7321;
+ struct rte_event ev;
+ struct test_event_dev_stats stats;
+ const int rx_enq = 0;
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** Gen pkt and enqueue ****************/
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev.mbuf = arp;
+ ev.queue_id = 0;
+ ev.flow_id = 3;
+ arp->seqn = MAGIC_SEQN;
+
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.rx_pkts != 1 ||
+ stats.tx_pkts != 1 ||
+ stats.port_inflight[wrk_enq] != 1) {
+ printf("%d: Sched core didn't handle pkt as expected\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ uint32_t deq_pkts;
+
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
+ if (deq_pkts < 1) {
+ printf("%d: Failed to deq\n", __LINE__);
+ return -1;
+ }
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (ev.mbuf->seqn != MAGIC_SEQN) {
+ printf("%d: magic sequence number not dequeued\n", __LINE__);
+ return -1;
+ }
+
+ rte_pktmbuf_free(ev.mbuf);
+ err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[wrk_enq] != 0) {
+ printf("%d: port inflight not correct\n", __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+inflight_counts(struct test *t)
+{
+ struct rte_event ev;
+ struct test_event_dev_stats stats;
+ const int rx_enq = 0;
+ const int p1 = 1;
+ const int p2 = 2;
+ int err;
+ int i;
+
+ /* Create instance with 4 ports */
+ if (init(t, 2, 3) < 0 ||
+ create_ports(t, 3) < 0 ||
+ create_atomic_qids(t, 2) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+ err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+#define QID1_NUM 5
+ for (i = 0; i < QID1_NUM; i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto err;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ goto err;
+ }
+ }
+#define QID2_NUM 3
+ for (i = 0; i < QID2_NUM; i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto err;
+ }
+ ev.queue_id = t->qid[1];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ goto err;
+ }
+ }
+
+ /* schedule */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ goto err;
+ }
+
+ if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
+ stats.tx_pkts != QID1_NUM + QID2_NUM) {
+ printf("%d: Sched core didn't handle pkt as expected\n",
+ __LINE__);
+ goto err;
+ }
+
+ if (stats.port_inflight[p1] != QID1_NUM) {
+ printf("%d: %s port 1 inflight not correct\n", __LINE__,
+ __func__);
+ goto err;
+ }
+ if (stats.port_inflight[p2] != QID2_NUM) {
+ printf("%d: %s port 2 inflight not correct\n", __LINE__,
+ __func__);
+ goto err;
+ }
+
+ /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
+ /* port 1 */
+ struct rte_event events[QID1_NUM + QID2_NUM];
+ uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
+ RTE_DIM(events), 0);
+
+ if (deq_pkts != QID1_NUM) {
+ printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
+ goto err;
+ }
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p1] != QID1_NUM) {
+ printf("%d: port 1 inflight decrement after DEQ != 0\n",
+ __LINE__);
+ goto err;
+ }
+ for (i = 0; i < QID1_NUM; i++) {
+ err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
+ 1);
+ if (err != 1) {
+ printf("%d: %s rte enqueue of inf release failed\n",
+ __LINE__, __func__);
+ goto err;
+ }
+ }
+
+ /*
+ * As the scheduler core decrements inflights, it needs to run to
+ * process packets to act on the drop messages
+ */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p1] != 0) {
+ printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
+ goto err;
+ }
+
+ /* port2 */
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
+ RTE_DIM(events), 0);
+ if (deq_pkts != QID2_NUM) {
+ printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
+ goto err;
+ }
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p2] != QID2_NUM) {
+ printf("%d: port 1 inflight decrement after DEQ != 0\n",
+ __LINE__);
+ goto err;
+ }
+ for (i = 0; i < QID2_NUM; i++) {
+ err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
+ 1);
+ if (err != 1) {
+ printf("%d: %s rte enqueue of inf release failed\n",
+ __LINE__, __func__);
+ goto err;
+ }
+ }
+
+ /*
+ * As the scheduler core decrements inflights, it needs to run to
+ * process packets to act on the drop messages
+ */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p2] != 0) {
+ printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
+ goto err;
+ }
+ cleanup(t);
+ return 0;
+
+err:
+ rte_event_dev_dump(evdev, stdout);
+ cleanup(t);
+ return -1;
+}
+
+static int
+parallel_basic(struct test *t, int check_order)
+{
+ const uint8_t rx_port = 0;
+ const uint8_t w1_port = 1;
+ const uint8_t w3_port = 3;
+ const uint8_t tx_port = 4;
+ int err;
+ int i;
+ uint32_t deq_pkts, j;
+ struct rte_mbuf *mbufs[3];
+ struct rte_mbuf *mbufs_out[3] = { 0 };
+ const uint32_t MAGIC_SEQN = 1234;
+
+ /* Create instance with 4 ports */
+ if (init(t, 2, tx_port + 1) < 0 ||
+ create_ports(t, tx_port + 1) < 0 ||
+ (check_order ? create_ordered_qids(t, 1) :
+ create_unordered_qids(t, 1)) < 0 ||
+ create_directed_qids(t, 1, &tx_port)) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * CQ mapping to QID
+ * We need three ports, all mapped to the same ordered qid0. Then we'll
+ * take a packet out to each port, re-enqueue in reverse order,
+ * then make sure the reordering has taken place properly when we
+ * dequeue from the tx_port.
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port w1_port
+ * \ / \
+ * qid0 - w2_port - qid1
+ * \ / \
+ * w3_port tx_port
+ */
+ /* CQ mapping to QID for LB ports (directed mapped on create) */
+ for (i = w1_port; i <= w3_port; i++) {
+ err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* Enqueue 3 packets to the rx port */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+ if (!mbufs[i]) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = mbufs[i];
+ mbufs[i]->seqn = MAGIC_SEQN + i;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue pkt %u, retval = %u\n",
+ __LINE__, i, err);
+ return -1;
+ }
+ }
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* use extra slot to make logic in loops easier */
+ struct rte_event deq_ev[w3_port + 1];
+
+ /* Dequeue the 3 packets, one from each worker port */
+ for (i = w1_port; i <= w3_port; i++) {
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
+ &deq_ev[i], 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: Failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ }
+
+ /* Enqueue each packet in reverse order, flushing after each one */
+ for (i = w3_port; i >= w1_port; i--) {
+
+ deq_ev[i].op = RTE_EVENT_OP_FORWARD;
+ deq_ev[i].queue_id = t->qid[1];
+ err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* dequeue from the tx ports, we should get 3 packets */
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
+ 3, 0);
+
+ /* Check to see if we've got all 3 packets */
+ if (deq_pkts != 3) {
+ printf("%d: expected 3 pkts at tx port got %d from port %d\n",
+ __LINE__, deq_pkts, tx_port);
+ rte_event_dev_dump(evdev, stdout);
+ return 1;
+ }
+
+ /* Check to see if the sequence numbers are in expected order */
+ if (check_order) {
+ for (j = 0 ; j < deq_pkts ; j++) {
+ if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
+ printf(
+ "%d: Incorrect sequence number(%d) from port %d\n",
+ __LINE__, mbufs_out[j]->seqn, tx_port);
+ return -1;
+ }
+ }
+ }
+
+ /* Destroy the instance */
+ cleanup(t);
+ return 0;
+}
+
+static int
+ordered_basic(struct test *t)
+{
+ return parallel_basic(t, 1);
+}
+
+static int
+unordered_basic(struct test *t)
+{
+ return parallel_basic(t, 0);
+}
+
+static int
+holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
+{
+ const struct rte_event new_ev = {
+ .op = RTE_EVENT_OP_NEW
+ /* all other fields zero */
+ };
+ struct rte_event ev = new_ev;
+ unsigned int rx_port = 0; /* port we get the first flow on */
+ char rx_port_used_stat[64];
+ char rx_port_free_stat[64];
+ char other_port_used_stat[64];
+
+ if (init(t, 1, 2) < 0 ||
+ create_ports(t, 2) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+ int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
+ if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
+ nb_links != 1) {
+ printf("%d: Error links queue to ports\n", __LINE__);
+ goto err;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto err;
+ }
+
+ /* send one packet and see where it goes, port 0 or 1 */
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error doing first enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
+ != 1)
+ rx_port = 1;
+
+ snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
+ "port_%u_cq_ring_used", rx_port);
+ snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
+ "port_%u_cq_ring_free", rx_port);
+ snprintf(other_port_used_stat, sizeof(other_port_used_stat),
+ "port_%u_cq_ring_used", rx_port ^ 1);
+ if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
+ != 1) {
+ printf("%d: Error, first event not scheduled\n", __LINE__);
+ goto err;
+ }
+
+ /* now fill up the rx port's queue with one flow to cause HOLB */
+ do {
+ ev = new_ev;
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error with enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+ } while (rte_event_dev_xstats_by_name_get(evdev,
+ rx_port_free_stat, NULL) != 0);
+
+ /* one more packet, which needs to stay in IQ - i.e. HOLB */
+ ev = new_ev;
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error with enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ /* check that the other port still has an empty CQ */
+ if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
+ != 0) {
+ printf("%d: Error, second port CQ is not empty\n", __LINE__);
+ goto err;
+ }
+ /* check IQ now has one packet */
+ if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
+ != 1) {
+ printf("%d: Error, QID does not have exactly 1 packet\n",
+ __LINE__);
+ goto err;
+ }
+
+ /* send another flow, which should pass the other IQ entry */
+ ev = new_ev;
+ ev.flow_id = 1;
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error with enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
+ != 1) {
+ printf("%d: Error, second flow did not pass out first\n",
+ __LINE__);
+ goto err;
+ }
+
+ if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
+ != 1) {
+ printf("%d: Error, QID does not have exactly 1 packet\n",
+ __LINE__);
+ goto err;
+ }
+ cleanup(t);
+ return 0;
+err:
+ rte_event_dev_dump(evdev, stdout);
+ cleanup(t);
+ return -1;
+}
+
+static int
+worker_loopback_worker_fn(void *arg)
+{
+ struct test *t = arg;
+ uint8_t port = t->port[1];
+ int count = 0;
+ int enqd;
+
+ /*
+ * Takes packets from the input port and then loops them back through
+ * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
+ * so each packet goes through 8*16 = 128 times.
+ */
+ printf("%d: \tWorker function started\n", __LINE__);
+ while (count < NUM_PACKETS) {
+#define BURST_SIZE 32
+ struct rte_event ev[BURST_SIZE];
+ uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
+ BURST_SIZE, 0);
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ ev[i].queue_id++;
+ if (ev[i].queue_id != 8) {
+ ev[i].op = RTE_EVENT_OP_FORWARD;
+ enqd = rte_event_enqueue_burst(evdev, port,
+ &ev[i], 1);
+ if (enqd != 1) {
+ printf("%d: Can't enqueue FWD!!\n",
+ __LINE__);
+ return -1;
+ }
+ continue;
+ }
+
+ ev[i].queue_id = 0;
+ ev[i].mbuf->udata64++;
+ if (ev[i].mbuf->udata64 != 16) {
+ ev[i].op = RTE_EVENT_OP_FORWARD;
+ enqd = rte_event_enqueue_burst(evdev, port,
+ &ev[i], 1);
+ if (enqd != 1) {
+ printf("%d: Can't enqueue FWD!!\n",
+ __LINE__);
+ return -1;
+ }
+ continue;
+ }
+ /* we have hit 16 iterations through system - drop */
+ rte_pktmbuf_free(ev[i].mbuf);
+ count++;
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
+ if (enqd != 1) {
+ printf("%d drop enqueue failed\n", __LINE__);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+worker_loopback_producer_fn(void *arg)
+{
+ struct test *t = arg;
+ uint8_t port = t->port[0];
+ uint64_t count = 0;
+
+ printf("%d: \tProducer function started\n", __LINE__);
+ while (count < NUM_PACKETS) {
+ struct rte_mbuf *m = 0;
+ do {
+ m = rte_pktmbuf_alloc(t->mbuf_pool);
+ } while (m == NULL);
+
+ m->udata64 = 0;
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .flow_id = (uintptr_t)m & 0xFFFF,
+ .mbuf = m,
+ };
+
+ if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
+ while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
+ 1)
+ rte_pause();
+ }
+
+ count++;
+ }
+
+ return 0;
+}
+
+static int
+worker_loopback(struct test *t, uint8_t disable_implicit_release)
+{
+ /* use a single producer core, and a worker core to see what happens
+ * if the worker loops packets back multiple times
+ */
+ struct test_event_dev_stats stats;
+ uint64_t print_cycles = 0, cycles = 0;
+ uint64_t tx_pkts = 0;
+ int err;
+ int w_lcore, p_lcore;
+
+ if (init(t, 8, 2) < 0 ||
+ create_atomic_qids(t, 8) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* RX with low max events */
+ static struct rte_event_port_conf conf = {
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ };
+ /* beware: this cannot be initialized in the static above as it would
+ * only be initialized once - and this needs to be set for multiple runs
+ */
+ conf.new_event_threshold = 512;
+ conf.disable_implicit_release = disable_implicit_release;
+
+ if (rte_event_port_setup(evdev, 0, &conf) < 0) {
+ printf("Error setting up RX port\n");
+ return -1;
+ }
+ t->port[0] = 0;
+ /* TX with higher max events */
+ conf.new_event_threshold = 4096;
+ if (rte_event_port_setup(evdev, 1, &conf) < 0) {
+ printf("Error setting up TX port\n");
+ return -1;
+ }
+ t->port[1] = 1;
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
+ if (err != 8) { /* should have mapped all queues*/
+ printf("%d: error mapping port 2 to all qids\n", __LINE__);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ p_lcore = rte_get_next_lcore(
+ /* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
+
+ rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
+ rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
+
+ print_cycles = cycles = rte_get_timer_cycles();
+ while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
+ rte_eal_get_lcore_state(w_lcore) != FINISHED) {
+
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ uint64_t new_cycles = rte_get_timer_cycles();
+
+ if (new_cycles - print_cycles > rte_get_timer_hz()) {
+ test_event_dev_stats_get(evdev, &stats);
+ printf(
+ "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
+ __LINE__, stats.rx_pkts, stats.tx_pkts);
+
+ print_cycles = new_cycles;
+ }
+ if (new_cycles - cycles > rte_get_timer_hz() * 3) {
+ test_event_dev_stats_get(evdev, &stats);
+ if (stats.tx_pkts == tx_pkts) {
+ rte_event_dev_dump(evdev, stdout);
+ printf("Dumping xstats:\n");
+ xstats_print();
+ printf(
+ "%d: No schedules for seconds, deadlock\n",
+ __LINE__);
+ return -1;
+ }
+ tx_pkts = stats.tx_pkts;
+ cycles = new_cycles;
+ }
+ }
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+ /* ensure all completions are flushed */
+
+ rte_eal_mp_wait_lcore();
+
+ cleanup(t);
+ return 0;
+}
+
+static struct rte_mempool *eventdev_func_mempool;
+
+int
+test_sw_eventdev(void)
+{
+ struct test *t;
+ int ret;
+
+ t = malloc(sizeof(struct test));
+ if (t == NULL)
+ return -1;
+ /* manually initialize the op, older gcc's complain on static
+ * initialization of struct elements that are a bitfield.
+ */
+ release_ev.op = RTE_EVENT_OP_RELEASE;
+
+ const char *eventdev_name = "event_sw";
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ printf("%d: Eventdev %s not found - creating.\n",
+ __LINE__, eventdev_name);
+ if (rte_vdev_init(eventdev_name, NULL) < 0) {
+ printf("Error creating eventdev\n");
+ goto test_fail;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ printf("Error finding newly created eventdev\n");
+ goto test_fail;
+ }
+ }
+
+ if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
+ printf("Failed to get service ID for software event dev\n");
+ goto test_fail;
+ }
+
+ rte_service_runstate_set(t->service_id, 1);
+ rte_service_set_runstate_mapped_check(t->service_id, 0);
+
+ /* Only create mbuf pool once, reuse for each test run */
+ if (!eventdev_func_mempool) {
+ eventdev_func_mempool = rte_pktmbuf_pool_create(
+ "EVENTDEV_SW_SA_MBUF_POOL",
+ (1<<12), /* 4k buffers */
+ 32 /*MBUF_CACHE_SIZE*/,
+ 0,
+ 512, /* use very small mbufs */
+ rte_socket_id());
+ if (!eventdev_func_mempool) {
+ printf("ERROR creating mempool\n");
+ goto test_fail;
+ }
+ }
+ t->mbuf_pool = eventdev_func_mempool;
+ printf("*** Running Single Directed Packet test...\n");
+ ret = test_single_directed_packet(t);
+ if (ret != 0) {
+ printf("ERROR - Single Directed Packet test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Directed Forward Credit test...\n");
+ ret = test_directed_forward_credits(t);
+ if (ret != 0) {
+ printf("ERROR - Directed Forward Credit test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Single Load Balanced Packet test...\n");
+ ret = single_packet(t);
+ if (ret != 0) {
+ printf("ERROR - Single Packet test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Unordered Basic test...\n");
+ ret = unordered_basic(t);
+ if (ret != 0) {
+ printf("ERROR - Unordered Basic test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Ordered Basic test...\n");
+ ret = ordered_basic(t);
+ if (ret != 0) {
+ printf("ERROR - Ordered Basic test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Burst Packets test...\n");
+ ret = burst_packets(t);
+ if (ret != 0) {
+ printf("ERROR - Burst Packets test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Load Balancing test...\n");
+ ret = load_balancing(t);
+ if (ret != 0) {
+ printf("ERROR - Load Balancing test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Prioritized Directed test...\n");
+ ret = test_priority_directed(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Directed test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Prioritized Atomic test...\n");
+ ret = test_priority_atomic(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Atomic test FAILED.\n");
+ goto test_fail;
+ }
+
+ printf("*** Running Prioritized Ordered test...\n");
+ ret = test_priority_ordered(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Ordered test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Prioritized Unordered test...\n");
+ ret = test_priority_unordered(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Unordered test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Invalid QID test...\n");
+ ret = invalid_qid(t);
+ if (ret != 0) {
+ printf("ERROR - Invalid QID test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Load Balancing History test...\n");
+ ret = load_balancing_history(t);
+ if (ret != 0) {
+ printf("ERROR - Load Balancing History test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Inflight Count test...\n");
+ ret = inflight_counts(t);
+ if (ret != 0) {
+ printf("ERROR - Inflight Count test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Abuse Inflights test...\n");
+ ret = abuse_inflights(t);
+ if (ret != 0) {
+ printf("ERROR - Abuse Inflights test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running XStats test...\n");
+ ret = xstats_tests(t);
+ if (ret != 0) {
+ printf("ERROR - XStats test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running XStats ID Reset test...\n");
+ ret = xstats_id_reset_tests(t);
+ if (ret != 0) {
+ printf("ERROR - XStats ID Reset test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running XStats Brute Force test...\n");
+ ret = xstats_brute_force(t);
+ if (ret != 0) {
+ printf("ERROR - XStats Brute Force test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running XStats ID Abuse test...\n");
+ ret = xstats_id_abuse_tests(t);
+ if (ret != 0) {
+ printf("ERROR - XStats ID Abuse test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running QID Priority test...\n");
+ ret = qid_priorities(t);
+ if (ret != 0) {
+ printf("ERROR - QID Priority test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Ordered Reconfigure test...\n");
+ ret = ordered_reconfigure(t);
+ if (ret != 0) {
+ printf("ERROR - Ordered Reconfigure test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Port LB Single Reconfig test...\n");
+ ret = port_single_lb_reconfig(t);
+ if (ret != 0) {
+ printf("ERROR - Port LB Single Reconfig test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Port Reconfig Credits test...\n");
+ ret = port_reconfig_credits(t);
+ if (ret != 0) {
+ printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
+ goto test_fail;
+ }
+ printf("*** Running Head-of-line-blocking test...\n");
+ ret = holb(t);
+ if (ret != 0) {
+ printf("ERROR - Head-of-line-blocking test FAILED.\n");
+ goto test_fail;
+ }
+ if (rte_lcore_count() >= 3) {
+ printf("*** Running Worker loopback test...\n");
+ ret = worker_loopback(t, 0);
+ if (ret != 0) {
+ printf("ERROR - Worker loopback test FAILED.\n");
+ return ret;
+ }
+
+ printf("*** Running Worker loopback test (implicit release disabled)...\n");
+ ret = worker_loopback(t, 1);
+ if (ret != 0) {
+ printf("ERROR - Worker loopback test FAILED.\n");
+ goto test_fail;
+ }
+ } else {
+ printf("### Not enough cores for worker loopback tests.\n");
+ printf("### Need at least 3 cores for the tests.\n");
+ }
+
+ /*
+ * Free test instance, leaving mempool initialized, and a pointer to it
+ * in static eventdev_func_mempool, as it is re-used on re-runs
+ */
+ free(t);
+
+ printf("SW Eventdev Selftest Successful.\n");
+ return 0;
+test_fail:
+ free(t);
+ printf("SW Eventdev Selftest Failed.\n");
+ return -1;
+}
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
index b3b3b17e..67151f77 100644
--- a/drivers/event/sw/sw_evdev_worker.c
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_atomic.h>
@@ -85,6 +57,7 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
struct sw_port *p = port;
struct sw_evdev *sw = (void *)p->sw;
uint32_t sw_inflights = rte_atomic32_read(&sw->inflights);
+ uint32_t credit_update_quanta = sw->credit_update_quanta;
int new = 0;
if (num > PORT_ENQUEUE_MAX_BURST_SIZE)
@@ -98,7 +71,6 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
if (p->inflight_credits < new) {
/* check if event enqueue brings port over max threshold */
- uint32_t credit_update_quanta = sw->credit_update_quanta;
if (sw_inflights + credit_update_quanta > sw->nb_events_limit)
return 0;
@@ -109,7 +81,6 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
return 0;
}
- uint32_t forwards = 0;
for (i = 0; i < num; i++) {
int op = ev[i].op;
int outstanding = p->outstanding_releases > 0;
@@ -118,7 +89,6 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
outstanding;
- forwards += (op == RTE_EVENT_OP_FORWARD);
new_ops[i] = sw_qe_flag_map[op];
new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
@@ -131,15 +101,12 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
p->outstanding_releases--;
/* error case: branch to avoid touching p->stats */
- if (unlikely(invalid_qid)) {
+ if (unlikely(invalid_qid && op != RTE_EVENT_OP_RELEASE)) {
p->stats.rx_dropped++;
p->inflight_credits++;
}
}
- /* handle directed port forward credits */
- p->inflight_credits -= forwards * p->is_directed;
-
/* returns number of events actually enqueued */
uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
new_ops);
@@ -152,6 +119,13 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
p->avg_pkt_ticks += burst_pkt_ticks / NUM_SAMPLES;
p->last_dequeue_ticks = 0;
}
+
+ /* Replenish credits if enough releases are performed */
+ if (p->inflight_credits >= credit_update_quanta * 2) {
+ rte_atomic32_sub(&sw->inflights, credit_update_quanta);
+ p->inflight_credits -= credit_update_quanta;
+ }
+
return enq;
}
@@ -167,41 +141,39 @@ sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
{
RTE_SET_USED(wait);
struct sw_port *p = (void *)port;
- struct sw_evdev *sw = (void *)p->sw;
struct rte_event_ring *ring = p->cq_worker_ring;
- uint32_t credit_update_quanta = sw->credit_update_quanta;
/* check that all previous dequeues have been released */
- if (!p->is_directed) {
+ if (p->implicit_release) {
+ struct sw_evdev *sw = (void *)p->sw;
+ uint32_t credit_update_quanta = sw->credit_update_quanta;
uint16_t out_rels = p->outstanding_releases;
uint16_t i;
for (i = 0; i < out_rels; i++)
sw_event_release(p, i);
+
+ /* Replenish credits if enough releases are performed */
+ if (p->inflight_credits >= credit_update_quanta * 2) {
+ rte_atomic32_sub(&sw->inflights, credit_update_quanta);
+ p->inflight_credits -= credit_update_quanta;
+ }
}
/* returns number of events actually dequeued */
uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL);
if (unlikely(ndeq == 0)) {
- p->outstanding_releases = 0;
p->zero_polls++;
p->total_polls++;
goto end;
}
- /* only add credits for directed ports - LB ports send RELEASEs */
- p->inflight_credits += ndeq * p->is_directed;
- p->outstanding_releases = ndeq;
+ p->outstanding_releases += ndeq;
p->last_dequeue_burst_sz = ndeq;
p->last_dequeue_ticks = rte_get_timer_cycles();
p->poll_buckets[(ndeq - 1) >> SW_DEQ_STAT_BUCKET_SHIFT]++;
p->total_polls++;
end:
- if (p->inflight_credits >= credit_update_quanta * 2 &&
- p->inflight_credits > credit_update_quanta + ndeq) {
- rte_atomic32_sub(&sw->inflights, credit_update_quanta);
- p->inflight_credits -= credit_update_quanta;
- }
return ndeq;
}
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
index 61a5c33b..7a6caa64 100644
--- a/drivers/event/sw/sw_evdev_xstats.c
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -1,38 +1,10 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_event_ring.h>
#include "sw_evdev.h"
-#include "iq_ring.h"
+#include "iq_chunk.h"
enum xstats_type {
/* common stats */
@@ -53,7 +25,6 @@ enum xstats_type {
pkt_cycles,
poll_return, /* for zero-count and used also for port bucket loop */
/* qid_specific */
- iq_size,
iq_used,
/* qid port mapping specific */
pinned,
@@ -144,7 +115,6 @@ get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
return infl;
} while (0);
break;
- case iq_size: return RTE_DIM(qid->iq[0]->ring);
default: return -1;
}
}
@@ -157,7 +127,7 @@ get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
const int iq_idx = extra_arg;
switch (type) {
- case iq_used: return iq_ring_count(qid->iq[iq_idx]);
+ case iq_used: return iq_count(&qid->iq[iq_idx]);
default: return -1;
}
}
@@ -236,13 +206,13 @@ sw_xstats_init(struct sw_evdev *sw)
/* all bucket dequeues are allowed to be reset, handled in loop below */
static const char * const qid_stats[] = {"rx", "tx", "drop",
- "inflight", "iq_size"
+ "inflight"
};
static const enum xstats_type qid_types[] = { rx, tx, dropped,
- inflight, iq_size
+ inflight
};
static const uint8_t qid_reset_allowed[] = {1, 1, 1,
- 0, 0
+ 0
};
static const char * const qid_iq_stats[] = { "used" };
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile
index f656c566..aae2cb10 100644
--- a/drivers/mempool/Makefile
+++ b/drivers/mempool/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright 2017 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of NXP nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/mempool/dpaa/Makefile b/drivers/mempool/dpaa/Makefile
index c49b0ee3..4c0d7aaa 100644
--- a/drivers/mempool/dpaa/Makefile
+++ b/drivers/mempool/dpaa/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright 2016 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of NXP nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index f5ee80f2..fb3b6ba0 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* System headers */
@@ -99,6 +73,8 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp)
rte_dpaa_bpid_info[bpid].meta_data_size =
sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
+ rte_dpaa_bpid_info[bpid].ptov_off = 0;
+ rte_dpaa_bpid_info[bpid].flags = 0;
bp_info = rte_malloc(NULL,
sizeof(struct dpaa_bp_info),
@@ -163,17 +139,30 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool,
DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
n, bp_info->bpid);
- ret = rte_dpaa_portal_init((void *)0);
- if (ret) {
- DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
- ret);
- return 0;
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
+ ret);
+ return 0;
+ }
}
while (i < n) {
+ uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
+
+ if (unlikely(!bp_info->ptov_off)) {
+ /* buffers are from single mem segment */
+ if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
+ bp_info->ptov_off
+ = (uint64_t)obj_table[i] - phy;
+ rte_dpaa_bpid_info[bp_info->bpid].ptov_off
+ = bp_info->ptov_off;
+ }
+ }
+
dpaa_buf_free(bp_info,
- (uint64_t)rte_mempool_virt2iova(obj_table[i]) +
- bp_info->meta_data_size);
+ (uint64_t)phy + bp_info->meta_data_size);
i = i + 1;
}
@@ -206,11 +195,13 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
return -1;
}
- ret = rte_dpaa_portal_init((void *)0);
- if (ret) {
- DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
- ret);
- return -1;
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
+ ret);
+ return -1;
+ }
}
while (n < count) {
@@ -241,7 +232,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
* i.e. first buffer is valid, remaining 6 buffers
* may be null.
*/
- bufaddr = (void *)rte_dpaa_mem_ptov(bufs[i].addr);
+ bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
m[n] = (struct rte_mbuf *)((char *)bufaddr
- bp_info->meta_data_size);
DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
@@ -272,13 +263,43 @@ dpaa_mbuf_get_count(const struct rte_mempool *mp)
return bman_query_free_buffers(bp_info->bp);
}
+static int
+dpaa_register_memory_area(const struct rte_mempool *mp,
+ char *vaddr __rte_unused,
+ rte_iova_t paddr __rte_unused,
+ size_t len)
+{
+ struct dpaa_bp_info *bp_info;
+ unsigned int total_elt_sz;
+
+ MEMPOOL_INIT_FUNC_TRACE();
+
+ if (!mp || !mp->pool_data) {
+ DPAA_MEMPOOL_ERR("Invalid mempool provided\n");
+ return 0;
+ }
+
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ DPAA_MEMPOOL_DEBUG("Req size %lu vs Available %u\n",
+ len, total_elt_sz * mp->size);
+
+ /* Detect pool area has sufficient space for elements in this memzone */
+ if (len >= total_elt_sz * mp->size)
+ bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
+
+ return 0;
+}
+
struct rte_mempool_ops dpaa_mpool_ops = {
- .name = "dpaa",
+ .name = DPAA_MEMPOOL_OPS_NAME,
.alloc = dpaa_mbuf_create_pool,
.free = dpaa_mbuf_free_pool,
.enqueue = dpaa_mbuf_free_bulk,
.dequeue = dpaa_mbuf_alloc_bulk,
.get_count = dpaa_mbuf_get_count,
+ .register_memory_area = dpaa_register_memory_area,
};
MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);
diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h
index 67958594..9435dd2f 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.h
+++ b/drivers/mempool/dpaa/dpaa_mempool.h
@@ -1,33 +1,7 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DPAA_MEMPOOL_H__
#define __DPAA_MEMPOOL_H__
@@ -54,6 +28,9 @@
/* Maximum release/acquire from BMAN */
#define DPAA_MBUF_MAX_ACQ_REL 8
+/* Buffers are allocated from single mem segment i.e. phys contiguous */
+#define DPAA_MPOOL_SINGLE_SEGMENT 0x01
+
struct dpaa_bp_info {
struct rte_mempool *mp;
struct bman_pool *bp;
@@ -61,8 +38,18 @@ struct dpaa_bp_info {
uint32_t size;
uint32_t meta_data_size;
int32_t dpaa_ops_index;
+ int64_t ptov_off;
+ uint8_t flags;
};
+static inline void *
+DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr)
+{
+ if (bp_info->ptov_off)
+ return ((void *)(addr + bp_info->ptov_off));
+ return rte_dpaa_mem_ptov(addr);
+}
+
#define DPAA_MEMPOOL_TO_POOL_INFO(__mp) \
((struct dpaa_bp_info *)__mp->pool_data)
diff --git a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
index cc635c73..d05f274d 100644
--- a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
+++ b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
@@ -2,7 +2,6 @@ DPDK_17.11 {
global:
rte_dpaa_bpid_info;
- rte_dpaa_pool_table;
local: *;
};
diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile
index dd19e100..efaac96e 100644
--- a/drivers/mempool/dpaa2/Makefile
+++ b/drivers/mempool/dpaa2/Makefile
@@ -1,32 +1,6 @@
-# BSD LICENSE
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 NXP
#
-# Copyright 2016 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of NXP nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 8bcbaa89..2bd62e88 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
@@ -40,7 +14,7 @@
#include <errno.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -380,7 +354,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
}
struct rte_mempool_ops dpaa2_mpool_ops = {
- .name = "dpaa2",
+ .name = DPAA2_MEMPOOL_OPS_NAME,
.alloc = rte_hw_mbuf_create_pool,
.free = rte_hw_mbuf_free_pool,
.enqueue = rte_hw_mbuf_free_bulk,
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
index 0971929e..4d346874 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DPAA2_HW_DPBP_H_
@@ -36,6 +10,8 @@
#define DPAA2_MAX_BUF_POOLS 8
+#define DPAA2_INVALID_MBUF_SEQN 0
+
struct buf_pool_cfg {
void *addr;
/**< The address from where DPAA2 will carve out the buffers */
diff --git a/drivers/mempool/meson.build b/drivers/mempool/meson.build
new file mode 100644
index 00000000..59918560
--- /dev/null
+++ b/drivers/mempool/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['ring', 'stack', 'octeontx']
+std_deps = ['mempool']
+config_flag_fmt = 'RTE_LIBRTE_@0@_MEMPOOL'
+driver_name_fmt = 'rte_mempool_@0@'
diff --git a/drivers/mempool/octeontx/Makefile b/drivers/mempool/octeontx/Makefile
index a2e2863c..dfc373e6 100644
--- a/drivers/mempool/octeontx/Makefile
+++ b/drivers/mempool/octeontx/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Cavium Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Cavium Networks nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
#
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/mempool/octeontx/meson.build b/drivers/mempool/octeontx/meson.build
new file mode 100644
index 00000000..1e894a56
--- /dev/null
+++ b/drivers/mempool/octeontx/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = files('octeontx_ssovf.c',
+ 'octeontx_mbox.c',
+ 'octeontx_fpavf.c',
+ 'rte_mempool_octeontx.c'
+)
+
+deps += ['mbuf', 'bus_pci']
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 3bc50f35..61c72c7c 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All Right reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include <stdlib.h>
@@ -133,6 +105,22 @@ struct octeontx_fpadev {
static struct octeontx_fpadev fpadev;
+int octeontx_logtype_fpavf;
+int octeontx_logtype_fpavf_mbox;
+
+RTE_INIT(otx_pool_init_log);
+static void
+otx_pool_init_log(void)
+{
+ octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx");
+ if (octeontx_logtype_fpavf >= 0)
+ rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE);
+
+ octeontx_logtype_fpavf_mbox = rte_log_register("pmd.mempool.octeontx.mbox");
+ if (octeontx_logtype_fpavf_mbox >= 0)
+ rte_log_set_level(octeontx_logtype_fpavf_mbox, RTE_LOG_NOTICE);
+}
+
/* lock is taken by caller */
static int
octeontx_fpa_gpool_alloc(unsigned int object_size)
@@ -386,8 +374,8 @@ err:
return ret;
}
-static int
-octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz,
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
void *memva, uint16_t gpool)
{
uint64_t va_end;
@@ -509,12 +497,9 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
uintptr_t
octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
- unsigned int buf_offset, char **va_start,
- int node_id)
+ unsigned int buf_offset, int node_id)
{
unsigned int gpool;
- void *memva;
- unsigned long memsz;
uintptr_t gpool_handle;
uintptr_t pool_bar;
int res;
@@ -522,9 +507,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
RTE_SET_USED(node_id);
RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
- if (unlikely(*va_start == NULL))
- goto error_end;
-
object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
if (object_size > FPA_MAX_OBJ_SIZE) {
errno = EINVAL;
@@ -567,15 +549,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
goto error_pool_destroy;
}
- /* vf pool setup */
- memsz = object_size * object_count;
- memva = *va_start;
- res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool);
- if (res < 0) {
- errno = res;
- goto error_gaura_detach;
- }
-
/* Release lock */
rte_spinlock_unlock(&fpadev.lock);
@@ -591,8 +564,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
return gpool_handle;
-error_gaura_detach:
- (void) octeontx_fpapf_aura_detach(gpool);
error_pool_destroy:
octeontx_fpavf_free(gpool);
octeontx_fpapf_pool_destroy(gpool);
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index 1d09f007..b76f40e7 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) 2017 Cavium Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef __OCTEONTX_FPAVF_H__
@@ -114,8 +86,10 @@ do { \
uintptr_t
octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
- unsigned int buf_offset, char **va_start,
- int node);
+ unsigned int buf_offset, int node);
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
+ void *memva, uint16_t gpool);
int
octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
int
diff --git a/drivers/mempool/octeontx/octeontx_mbox.c b/drivers/mempool/octeontx/octeontx_mbox.c
index 9525da1a..f8cb6a45 100644
--- a/drivers/mempool/octeontx/octeontx_mbox.c
+++ b/drivers/mempool/octeontx/octeontx_mbox.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include <string.h>
@@ -89,7 +61,7 @@ struct mbox_ram_hdr {
};
static inline void
-mbox_msgcpy(uint8_t *d, const uint8_t *s, uint16_t size)
+mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size)
{
uint16_t i;
@@ -128,7 +100,7 @@ mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
/* Write the msg header */
rte_write64(new_hdr.u64, ram_mbox_hdr);
- rte_io_wmb();
+ rte_smp_wmb();
/* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
rte_write64(0, m->reg);
}
diff --git a/drivers/mempool/octeontx/octeontx_mbox.h b/drivers/mempool/octeontx/octeontx_mbox.h
index 49f38257..1b056071 100644
--- a/drivers/mempool/octeontx/octeontx_mbox.h
+++ b/drivers/mempool/octeontx/octeontx_mbox.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef __OCTEONTX_MBOX_H__
diff --git a/drivers/mempool/octeontx/octeontx_pool_logs.h b/drivers/mempool/octeontx/octeontx_pool_logs.h
index 58ccb0f0..95865192 100644
--- a/drivers/mempool/octeontx/octeontx_pool_logs.h
+++ b/drivers/mempool/octeontx/octeontx_pool_logs.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) 2017 Cavium Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef __OCTEONTX_POOL_LOGS_H__
@@ -35,34 +7,25 @@
#include <rte_debug.h>
-#ifdef RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG
-#define fpavf_log_info(fmt, args...) \
- RTE_LOG(INFO, PMD, "%s() line %u: " fmt "\n", \
- __func__, __LINE__, ## args)
-#define fpavf_log_dbg(fmt, args...) \
- RTE_LOG(DEBUG, PMD, "%s() line %u: " fmt "\n", \
- __func__, __LINE__, ## args)
+#define FPAVF_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf,\
+ "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
-#define mbox_log_info(fmt, args...) \
- RTE_LOG(INFO, PMD, "%s() line %u: " fmt "\n", \
- __func__, __LINE__, ## args)
-#define mbox_log_dbg(fmt, args...) \
- RTE_LOG(DEBUG, PMD, "%s() line %u: " fmt "\n", \
- __func__, __LINE__, ## args)
-#else
-#define fpavf_log_info(fmt, args...)
-#define fpavf_log_dbg(fmt, args...)
-#define mbox_log_info(fmt, args...)
-#define mbox_log_dbg(fmt, args...)
-#endif
+#define MBOX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf_mbox,\
+ "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
+#define fpavf_log_info(fmt, ...) FPAVF_LOG(INFO, fmt, ##__VA_ARGS__)
+#define fpavf_log_dbg(fmt, ...) FPAVF_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define fpavf_log_err(fmt, ...) FPAVF_LOG(ERR, fmt, ##__VA_ARGS__)
#define fpavf_func_trace fpavf_log_dbg
-#define fpavf_log_err(fmt, args...) \
- RTE_LOG(ERR, PMD, "%s() line %u: " fmt "\n", \
- __func__, __LINE__, ## args)
+
+#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__)
+#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__)
#define mbox_func_trace mbox_log_dbg
-#define mbox_log_err(fmt, args...) \
- RTE_LOG(ERR, PMD, "%s() line %u: " fmt "\n", \
- __func__, __LINE__, ## args)
+
+extern int octeontx_logtype_fpavf;
+extern int octeontx_logtype_fpavf_mbox;
#endif /* __OCTEONTX_POOL_LOGS_H__*/
diff --git a/drivers/mempool/octeontx/octeontx_ssovf.c b/drivers/mempool/octeontx/octeontx_ssovf.c
index 012c887d..97b24066 100644
--- a/drivers/mempool/octeontx/octeontx_ssovf.c
+++ b/drivers/mempool/octeontx/octeontx_ssovf.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include <rte_atomic.h>
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index e89355cd..d143d05c 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -1,34 +1,7 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) 2017 Cavium Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
+
#include <stdio.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
@@ -36,55 +9,18 @@
#include "octeontx_fpavf.h"
-/*
- * Per-pool descriptor.
- * Links mempool with the corresponding memzone,
- * that provides memory under the pool's elements.
- */
-struct octeontx_pool_info {
- const struct rte_mempool *mp;
- uintptr_t mz_addr;
-
- SLIST_ENTRY(octeontx_pool_info) link;
-};
-
-SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
-
-/* List of the allocated pools */
-static struct octeontx_pool_list octeontx_pool_head =
- SLIST_HEAD_INITIALIZER(octeontx_pool_head);
-/* Spinlock to protect pool list */
-static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
-
static int
octeontx_fpavf_alloc(struct rte_mempool *mp)
{
uintptr_t pool;
- struct octeontx_pool_info *pool_info;
uint32_t memseg_count = mp->size;
uint32_t object_size;
- uintptr_t va_start;
int rc = 0;
- rte_spinlock_lock(&pool_list_lock);
- SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
- if (pool_info->mp == mp)
- break;
- }
- if (pool_info == NULL) {
- rte_spinlock_unlock(&pool_list_lock);
- return -ENXIO;
- }
-
- /* virtual hugepage mapped addr */
- va_start = pool_info->mz_addr;
- rte_spinlock_unlock(&pool_list_lock);
-
object_size = mp->elt_size + mp->header_size + mp->trailer_size;
pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
OCTEONTX_FPAVF_BUF_OFFSET,
- (char **)&va_start,
mp->socket_id);
rc = octeontx_fpa_bufpool_block_size(pool);
if (rc < 0)
@@ -109,27 +45,9 @@ _end:
static void
octeontx_fpavf_free(struct rte_mempool *mp)
{
- struct octeontx_pool_info *pool_info;
uintptr_t pool;
-
pool = (uintptr_t)mp->pool_id;
- rte_spinlock_lock(&pool_list_lock);
- SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
- if (pool_info->mp == mp)
- break;
- }
-
- if (pool_info == NULL) {
- rte_spinlock_unlock(&pool_list_lock);
- rte_panic("%s: trying to free pool with no valid metadata",
- __func__);
- }
-
- SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
- rte_spinlock_unlock(&pool_list_lock);
-
- rte_free(pool_info);
octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
}
@@ -222,21 +140,14 @@ static int
octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
char *vaddr, rte_iova_t paddr, size_t len)
{
- struct octeontx_pool_info *pool_info;
-
RTE_SET_USED(paddr);
- RTE_SET_USED(len);
+ uint8_t gpool;
+ uintptr_t pool_bar;
- pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
- if (pool_info == NULL)
- return -ENOMEM;
+ gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
+ pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
- pool_info->mp = mp;
- pool_info->mz_addr = (uintptr_t)vaddr;
- rte_spinlock_lock(&pool_list_lock);
- SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
- rte_spinlock_unlock(&pool_list_lock);
- return 0;
+ return octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
}
static struct rte_mempool_ops octeontx_fpavf_ops = {
diff --git a/drivers/mempool/ring/Makefile b/drivers/mempool/ring/Makefile
index a7889b96..ddab522f 100644
--- a/drivers/mempool/ring/Makefile
+++ b/drivers/mempool/ring/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright 2017 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of NXP nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/mempool/ring/meson.build b/drivers/mempool/ring/meson.build
new file mode 100644
index 00000000..a021e908
--- /dev/null
+++ b/drivers/mempool/ring/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('rte_mempool_ring.c')
diff --git a/drivers/mempool/ring/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
index 5c132bf6..bc123fc5 100644
--- a/drivers/mempool/ring/rte_mempool_ring.c
+++ b/drivers/mempool/ring/rte_mempool_ring.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <stdio.h>
diff --git a/drivers/mempool/stack/Makefile b/drivers/mempool/stack/Makefile
index f8d6c574..0444aeda 100644
--- a/drivers/mempool/stack/Makefile
+++ b/drivers/mempool/stack/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright 2017 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of NXP nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/mempool/stack/meson.build b/drivers/mempool/stack/meson.build
new file mode 100644
index 00000000..b75a3bb5
--- /dev/null
+++ b/drivers/mempool/stack/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('rte_mempool_stack.c')
diff --git a/drivers/mempool/stack/rte_mempool_stack.c b/drivers/mempool/stack/rte_mempool_stack.c
index 817f77e6..e6d504af 100644
--- a/drivers/mempool/stack/rte_mempool_stack.c
+++ b/drivers/mempool/stack/rte_mempool_stack.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
*/
#include <stdio.h>
diff --git a/drivers/meson.build b/drivers/meson.build
new file mode 100644
index 00000000..b41a0f18
--- /dev/null
+++ b/drivers/meson.build
@@ -0,0 +1,133 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+# Defines the order in which the drivers are buit.
+driver_classes = ['bus',
+ 'mempool', # depends on bus.
+ 'net', # depends on bus and mempool.
+ 'crypto', # depenss on bus, mempool (net in future).
+ 'event'] # depends on bus, mempool and net.
+
+foreach class:driver_classes
+ drivers = []
+ std_deps = []
+ config_flag_fmt = '' # format string used to set the value in dpdk_conf
+ driver_name_fmt = '' # format string for driver name, used to name
+ # the library, the dependency and to find the
+ # version file for linking
+
+ subdir(class)
+
+ foreach drv:drivers
+ drv_path = join_paths(class, drv)
+
+ # set up empty variables used for build
+ build = true # set to false to disable, e.g. missing deps
+ name = drv
+ version = 1
+ allow_experimental_apis = false
+ sources = []
+ objs = []
+ cflags = machine_args
+ includes = [include_directories(drv_path)]
+ # set up internal deps. Drivers can append/override as necessary
+ deps = std_deps
+ # ext_deps: Stores external library dependency got
+ # using dependency() or cc.find_library(). For most cases, we
+ # probably also need to specify the "-l" flags in
+ # pkgconfig_extra_libs variable too, so that it can be reflected
+ # in the pkgconfig output for static builds
+ ext_deps = []
+ pkgconfig_extra_libs = []
+
+ # pull in driver directory which should assign to each of the above
+ subdir(drv_path)
+
+ if build
+ dpdk_conf.set(config_flag_fmt.format(name.to_upper()),1)
+ lib_name = driver_name_fmt.format(name)
+
+ if allow_experimental_apis
+ cflags += '-DALLOW_EXPERIMENTAL_API'
+ endif
+
+ # get dependency objs from strings
+ shared_objs = []
+ static_objs = []
+ foreach d:deps
+ shared_objs += [get_variable('shared_rte_' + d)]
+ static_objs += [get_variable('static_rte_' + d)]
+ endforeach
+ shared_objs += ext_deps
+ static_objs += ext_deps
+ dpdk_extra_ldflags += pkgconfig_extra_libs
+
+ # generate pmdinfo sources by building a temporary
+ # lib and then running pmdinfogen on the contents of
+ # that lib. The final lib reuses the object files and
+ # adds in the new source file.
+ out_filename = lib_name + '.pmd.c'
+ tmp_lib = static_library('tmp_' + lib_name,
+ sources,
+ include_directories: includes,
+ dependencies: static_objs,
+ c_args: cflags)
+ objs += tmp_lib.extract_all_objects()
+ sources = custom_target(out_filename,
+ command: [pmdinfo, tmp_lib.full_path(),
+ '@OUTPUT@', pmdinfogen],
+ output: out_filename,
+ depends: [pmdinfogen, tmp_lib])
+
+ if get_option('per_library_versions')
+ lib_version = '@0@.1'.format(version)
+ so_version = '@0@'.format(version)
+ else
+ pver = meson.project_version().split('.')
+ lib_version = '@0@.@1@'.format(pver.get(0),
+ pver.get(1))
+ so_version = lib_version
+ endif
+
+ # now build the static driver
+ static_lib = static_library(lib_name,
+ sources,
+ objects: objs,
+ include_directories: includes,
+ dependencies: static_objs,
+ c_args: cflags,
+ install: true)
+
+ # now build the shared driver
+ version_map = '@0@/@1@/@2@_version.map'.format(
+ meson.current_source_dir(),
+ drv_path, lib_name)
+ shared_lib = shared_library(lib_name,
+ sources,
+ objects: objs,
+ include_directories: includes,
+ dependencies: shared_objs,
+ c_args: cflags,
+ link_args: '-Wl,--version-script=' + version_map,
+ link_depends: version_map,
+ version: lib_version,
+ soversion: so_version,
+ install: true,
+ install_dir: driver_install_path)
+
+ # create a dependency object and add it to the global dictionary so
+ # testpmd or other built-in apps can find it if necessary
+ shared_dep = declare_dependency(link_with: shared_lib,
+ include_directories: includes,
+ dependencies: shared_objs)
+ static_dep = declare_dependency(link_with: static_lib,
+ include_directories: includes,
+ dependencies: static_objs)
+
+ dpdk_drivers += static_lib
+
+ set_variable('shared_@0@'.format(lib_name), shared_dep)
+ set_variable('static_@0@'.format(lib_name), static_dep)
+ endif # build
+ endforeach
+endforeach
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ef09b4e1..e1127326 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2015 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
@@ -38,6 +10,7 @@ endif
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
+DIRS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf
DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
@@ -66,6 +39,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2
DIRS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap
DIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += thunderx
+DIRS-$(CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD) += vdev_netvsc
DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio
DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3
diff --git a/drivers/net/af_packet/meson.build b/drivers/net/af_packet/meson.build
new file mode 100644
index 00000000..92f6a971
--- /dev/null
+++ b/drivers/net/af_packet/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+sources = files('rte_eth_af_packet.c')
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index fa84eb9c..57eccfd0 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -1,43 +1,13 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014 John W. Linville <linville@tuxdriver.com>
- *
- * Originally based upon librte_pmd_pcap code:
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014 John W. Linville <linville@tuxdriver.com>
+ * Originally based upon librte_pmd_pcap code:
+ * Copyright(c) 2010-2015 Intel Corporation.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
*/
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_kvargs.h>
@@ -124,7 +94,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_AUTONEG
+ .link_autoneg = ETH_LINK_AUTONEG
};
static uint16_t
diff --git a/drivers/net/ark/ark_ethdev_rx.h b/drivers/net/ark/ark_ethdev_rx.h
index 3a54a4c9..14678711 100644
--- a/drivers/net/ark/ark_ethdev_rx.h
+++ b/drivers/net/ark/ark_ethdev_rx.h
@@ -38,7 +38,7 @@
#include <rte_mbuf.h>
#include <rte_mempool.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
int eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
diff --git a/drivers/net/ark/ark_ethdev_tx.h b/drivers/net/ark/ark_ethdev_tx.h
index 8aaafc22..657f895a 100644
--- a/drivers/net/ark/ark_ethdev_tx.h
+++ b/drivers/net/ark/ark_ethdev_tx.h
@@ -36,7 +36,7 @@
#include <stdint.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
uint16_t eth_ark_xmit_pkts_noop(void *vtxq,
diff --git a/drivers/net/ark/ark_ext.h b/drivers/net/ark/ark_ext.h
index d26c8198..031cfddc 100644
--- a/drivers/net/ark/ark_ext.h
+++ b/drivers/net/ark/ark_ext.h
@@ -34,7 +34,7 @@
#ifndef _ARK_EXT_H_
#define _ARK_EXT_H_
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
/*
* This is the template file for users who which to define a dynamic
diff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h
index aef2cf73..41eb260e 100644
--- a/drivers/net/ark/ark_global.h
+++ b/drivers/net/ark/ark_global.h
@@ -38,7 +38,7 @@
#include <assert.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/net/ark/ark_pktchkr.c
index 202a1d9b..2cadaab8 100644
--- a/drivers/net/ark/ark_pktchkr.c
+++ b/drivers/net/ark/ark_pktchkr.c
@@ -36,7 +36,7 @@
#include <locale.h>
#include <unistd.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include "ark_pktchkr.h"
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c
index 018f37b6..d3c3dee1 100644
--- a/drivers/net/ark/ark_pktgen.c
+++ b/drivers/net/ark/ark_pktgen.c
@@ -38,7 +38,7 @@
#include <rte_eal.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include "ark_pktgen.h"
diff --git a/drivers/net/avf/Makefile b/drivers/net/avf/Makefile
new file mode 100644
index 00000000..3f815bbc
--- /dev/null
+++ b/drivers/net/avf/Makefile
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_avf.a
+
+CFLAGS += -O3
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+
+# used to dump HW descriptor for debugging
+# CFLAGS += -DDEBUG_DUMP_DESC
+
+EXPORT_MAP := rte_pmd_avf_version.map
+
+LIBABIVER := 1
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings
+#
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER =
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS_BASE_DRIVER = -Wno-pointer-to-int-cast
+else
+CFLAGS_BASE_DRIVER = -Wno-pointer-to-int-cast
+
+endif
+OBJS_BASE_DRIVER=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_adminq.c
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_common.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_vchnl.c
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_rxtx.c
+ifeq ($(CONFIG_RTE_ARCH_X86), y)
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_INC_VECTOR) += avf_rxtx_vec_sse.c
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/avf/avf.h b/drivers/net/avf/avf.h
new file mode 100644
index 00000000..dcf8d1c7
--- /dev/null
+++ b/drivers/net/avf/avf.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_ETHDEV_H_
+#define _AVF_ETHDEV_H_
+
+#include <rte_kvargs.h>
+
+#define AVF_AQ_LEN 32
+#define AVF_AQ_BUF_SZ 4096
+#define AVF_RESET_WAIT_CNT 50
+#define AVF_BUF_SIZE_MIN 1024
+#define AVF_FRAME_SIZE_MAX 9728
+#define AVF_QUEUE_BASE_ADDR_UNIT 128
+
+#define AVF_MAX_NUM_QUEUES 16
+
+#define AVF_NUM_MACADDR_MAX 64
+
+#define AVF_DEFAULT_RX_PTHRESH 8
+#define AVF_DEFAULT_RX_HTHRESH 8
+#define AVF_DEFAULT_RX_WTHRESH 0
+
+#define AVF_DEFAULT_RX_FREE_THRESH 32
+
+#define AVF_DEFAULT_TX_PTHRESH 32
+#define AVF_DEFAULT_TX_HTHRESH 0
+#define AVF_DEFAULT_TX_WTHRESH 0
+
+#define AVF_DEFAULT_TX_FREE_THRESH 32
+#define AVF_DEFAULT_TX_RS_THRESH 32
+
+#define AVF_BASIC_OFFLOAD_CAPS ( \
+ VF_BASE_MODE_OFFLOADS | \
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | \
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+
+#define AVF_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP | \
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+
+#define AVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define AVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+/* Default queue interrupt throttling time in microseconds */
+#define AVF_ITR_INDEX_DEFAULT 0
+#define AVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define AVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+
+/* The overhead from MTU to max frame size.
+ * Considering QinQ packet, the VLAN tag needs to be counted twice.
+ */
+#define AVF_VLAN_TAG_SIZE 4
+#define AVF_ETH_OVERHEAD \
+ (ETHER_HDR_LEN + ETHER_CRC_LEN + AVF_VLAN_TAG_SIZE * 2)
+
+struct avf_adapter;
+struct avf_rx_queue;
+struct avf_tx_queue;
+
+/* Structure that defines a VSI, associated with a adapter. */
+struct avf_vsi {
+ struct avf_adapter *adapter; /* Backreference to associated adapter */
+ uint16_t vsi_id;
+ uint16_t nb_qps; /* Number of queue pairs VSI can occupy */
+ uint16_t nb_used_qps; /* Number of queue pairs VSI uses */
+ uint16_t max_macaddrs; /* Maximum number of MAC addresses */
+ uint16_t base_vector;
+ uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
+};
+
+/* TODO: is that correct to assume the max number to be 16 ?*/
+#define AVF_MAX_MSIX_VECTORS 16
+
+/* Structure to store private data specific for VF instance. */
+struct avf_info {
+ uint16_t num_queue_pairs;
+ uint16_t max_pkt_len; /* Maximum packet length */
+ uint16_t mac_num; /* Number of MAC addresses */
+ bool promisc_unicast_enabled;
+ bool promisc_multicast_enabled;
+
+ struct virtchnl_version_info virtchnl_version;
+ struct virtchnl_vf_resource *vf_res; /* VF resource */
+ struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+
+ volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+ uint32_t cmd_retval; /* return value of the cmd response from PF */
+ uint8_t *aq_resp; /* buffer to store the adminq response from PF */
+
+ /* Event from pf */
+ bool dev_closed;
+ bool link_up;
+ enum virtchnl_link_speed link_speed;
+
+ struct avf_vsi vsi;
+ bool vf_reset;
+ uint64_t flags;
+
+ uint8_t *rss_lut;
+ uint8_t *rss_key;
+ uint16_t nb_msix; /* number of MSI-X interrupts on Rx */
+ uint16_t msix_base; /* msix vector base from */
+ /* queue bitmask for each vector */
+ uint16_t rxq_map[AVF_MAX_MSIX_VECTORS];
+};
+
+#define AVF_MAX_PKT_TYPE 256
+
+/* Structure to store private data for each VF instance. */
+struct avf_adapter {
+ struct avf_hw hw;
+ struct rte_eth_dev *eth_dev;
+ struct avf_info vf;
+
+ bool rx_bulk_alloc_allowed;
+ /* For vector PMD */
+ bool rx_vec_allowed;
+ bool tx_vec_allowed;
+};
+
+/* AVF_DEV_PRIVATE_TO */
+#define AVF_DEV_PRIVATE_TO_ADAPTER(adapter) \
+ ((struct avf_adapter *)adapter)
+#define AVF_DEV_PRIVATE_TO_VF(adapter) \
+ (&((struct avf_adapter *)adapter)->vf)
+#define AVF_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct avf_adapter *)adapter)->hw)
+
+/* AVF_VSI_TO */
+#define AVF_VSI_TO_HW(vsi) \
+ (&(((struct avf_vsi *)vsi)->adapter->hw))
+#define AVF_VSI_TO_VF(vsi) \
+ (&(((struct avf_vsi *)vsi)->adapter->vf))
+#define AVF_VSI_TO_ETH_DEV(vsi) \
+ (((struct avf_vsi *)vsi)->adapter->eth_dev)
+
+static inline void
+avf_init_adminq_parameter(struct avf_hw *hw)
+{
+ hw->aq.num_arq_entries = AVF_AQ_LEN;
+ hw->aq.num_asq_entries = AVF_AQ_LEN;
+ hw->aq.arq_buf_size = AVF_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = AVF_AQ_BUF_SZ;
+}
+
+static inline uint16_t
+avf_calc_itr_interval(int16_t interval)
+{
+ if (interval < 0 || interval > AVF_QUEUE_ITR_INTERVAL_MAX)
+ interval = AVF_QUEUE_ITR_INTERVAL_DEFAULT;
+
+ /* Convert to hardware count, as writing each 1 represents 2 us */
+ return interval / 2;
+}
+
+/* structure used for sending and checking response of virtchnl ops */
+struct avf_cmd_info {
+ enum virtchnl_ops ops;
+ uint8_t *in_args; /* buffer for sending */
+ uint32_t in_args_size; /* buffer size for sending */
+ uint8_t *out_buffer; /* buffer for response */
+ uint32_t out_size; /* buffer size for response */
+};
+
+/* clear current command. Only call in case execute
+ * _atomic_set_cmd successfully.
+ */
+static inline void
+_clear_cmd(struct avf_info *vf)
+{
+ rte_wmb();
+ vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
+ vf->cmd_retval = VIRTCHNL_STATUS_SUCCESS;
+}
+
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_cmd(struct avf_info *vf, enum virtchnl_ops ops)
+{
+ int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+ if (!ret)
+ PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+ return !ret;
+}
+
+int avf_check_api_version(struct avf_adapter *adapter);
+int avf_get_vf_resource(struct avf_adapter *adapter);
+void avf_handle_virtchnl_msg(struct rte_eth_dev *dev);
+int avf_enable_vlan_strip(struct avf_adapter *adapter);
+int avf_disable_vlan_strip(struct avf_adapter *adapter);
+int avf_switch_queue(struct avf_adapter *adapter, uint16_t qid,
+ bool rx, bool on);
+int avf_enable_queues(struct avf_adapter *adapter);
+int avf_disable_queues(struct avf_adapter *adapter);
+int avf_configure_rss_lut(struct avf_adapter *adapter);
+int avf_configure_rss_key(struct avf_adapter *adapter);
+int avf_configure_queues(struct avf_adapter *adapter);
+int avf_config_irq_map(struct avf_adapter *adapter);
+void avf_add_del_all_mac_addr(struct avf_adapter *adapter, bool add);
+int avf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+int avf_query_stats(struct avf_adapter *adapter,
+ struct virtchnl_eth_stats **pstats);
+int avf_config_promisc(struct avf_adapter *adapter, bool enable_unicast,
+ bool enable_multicast);
+int avf_add_del_eth_addr(struct avf_adapter *adapter,
+ struct ether_addr *addr, bool add);
+int avf_add_del_vlan(struct avf_adapter *adapter, uint16_t vlanid, bool add);
+#endif /* _AVF_ETHDEV_H_ */
diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c
new file mode 100644
index 00000000..4df66170
--- /dev/null
+++ b/drivers/net/avf/avf_ethdev.c
@@ -0,0 +1,1430 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include <rte_interrupts.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_dev.h>
+
+#include "avf_log.h"
+#include "base/avf_prototype.h"
+#include "base/avf_adminq_cmd.h"
+#include "base/avf_type.h"
+
+#include "avf.h"
+#include "avf_rxtx.h"
+
+static int avf_dev_configure(struct rte_eth_dev *dev);
+static int avf_dev_start(struct rte_eth_dev *dev);
+static void avf_dev_stop(struct rte_eth_dev *dev);
+static void avf_dev_close(struct rte_eth_dev *dev);
+static void avf_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static const uint32_t *avf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static int avf_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void avf_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void avf_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void avf_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void avf_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int avf_dev_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ uint32_t index,
+ uint32_t pool);
+static void avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int avf_dev_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vlan_id, int on);
+static int avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int avf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int avf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int avf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static int avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+
+int avf_logtype_init;
+int avf_logtype_driver;
+
+static const struct rte_pci_id pci_id_avf_map[] = {
+ { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct eth_dev_ops avf_eth_dev_ops = {
+ .dev_configure = avf_dev_configure,
+ .dev_start = avf_dev_start,
+ .dev_stop = avf_dev_stop,
+ .dev_close = avf_dev_close,
+ .dev_infos_get = avf_dev_info_get,
+ .dev_supported_ptypes_get = avf_dev_supported_ptypes_get,
+ .link_update = avf_dev_link_update,
+ .stats_get = avf_dev_stats_get,
+ .promiscuous_enable = avf_dev_promiscuous_enable,
+ .promiscuous_disable = avf_dev_promiscuous_disable,
+ .allmulticast_enable = avf_dev_allmulticast_enable,
+ .allmulticast_disable = avf_dev_allmulticast_disable,
+ .mac_addr_add = avf_dev_add_mac_addr,
+ .mac_addr_remove = avf_dev_del_mac_addr,
+ .vlan_filter_set = avf_dev_vlan_filter_set,
+ .vlan_offload_set = avf_dev_vlan_offload_set,
+ .rx_queue_start = avf_dev_rx_queue_start,
+ .rx_queue_stop = avf_dev_rx_queue_stop,
+ .tx_queue_start = avf_dev_tx_queue_start,
+ .tx_queue_stop = avf_dev_tx_queue_stop,
+ .rx_queue_setup = avf_dev_rx_queue_setup,
+ .rx_queue_release = avf_dev_rx_queue_release,
+ .tx_queue_setup = avf_dev_tx_queue_setup,
+ .tx_queue_release = avf_dev_tx_queue_release,
+ .mac_addr_set = avf_dev_set_default_mac_addr,
+ .reta_update = avf_dev_rss_reta_update,
+ .reta_query = avf_dev_rss_reta_query,
+ .rss_hash_update = avf_dev_rss_hash_update,
+ .rss_hash_conf_get = avf_dev_rss_hash_conf_get,
+ .rxq_info_get = avf_dev_rxq_info_get,
+ .txq_info_get = avf_dev_txq_info_get,
+ .rx_queue_count = avf_dev_rxq_count,
+ .rx_descriptor_status = avf_dev_rx_desc_status,
+ .tx_descriptor_status = avf_dev_tx_desc_status,
+ .mtu_set = avf_dev_mtu_set,
+ .rx_queue_intr_enable = avf_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = avf_dev_rx_queue_intr_disable,
+};
+
+static int
+avf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *ad =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(ad);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+ ad->rx_bulk_alloc_allowed = true;
+#ifdef RTE_LIBRTE_AVF_INC_VECTOR
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * vector Rx/Tx preconditions, it will be reset.
+ */
+ ad->rx_vec_allowed = true;
+ ad->tx_vec_allowed = true;
+#else
+ ad->rx_vec_allowed = false;
+ ad->tx_vec_allowed = false;
+#endif
+
+ /* Vlan stripping setting */
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ avf_enable_vlan_strip(ad);
+ else
+ avf_disable_vlan_strip(ad);
+ }
+ return 0;
+}
+
+static int
+avf_init_rss(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct rte_eth_rss_conf *rss_conf;
+ uint8_t i, j, nb_q;
+ int ret;
+
+ rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
+ AVF_MAX_NUM_QUEUES);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
+ PMD_DRV_LOG(DEBUG, "RSS is not supported");
+ return -ENOTSUP;
+ }
+ if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
+ /* set all lut items to default queue */
+ for (i = 0; i < vf->vf_res->rss_lut_size; i++)
+ vf->rss_lut[i] = 0;
+ ret = avf_configure_rss_lut(adapter);
+ return ret;
+ }
+
+ /* In AVF, RSS enablement is set by PF driver. It is not supported
+ * to set based on rss_conf->rss_hf.
+ */
+
+ /* configure RSS key */
+ if (!rss_conf->rss_key) {
+ /* Calculate the default hash key */
+ for (i = 0; i <= vf->vf_res->rss_key_size; i++)
+ vf->rss_key[i] = (uint8_t)rte_rand();
+ } else
+ rte_memcpy(vf->rss_key, rss_conf->rss_key,
+ RTE_MIN(rss_conf->rss_key_len,
+ vf->vf_res->rss_key_size));
+
+ /* init RSS LUT table */
+ for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) {
+ if (j >= nb_q)
+ j = 0;
+ vf->rss_lut[i] = j;
+ }
+ /* send virtchnnl ops to configure rss*/
+ ret = avf_configure_rss_lut(adapter);
+ if (ret)
+ return ret;
+ ret = avf_configure_rss_key(adapter);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = dev->data;
+ uint16_t buf_size, max_pkt_len, len;
+
+ buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+
+ /* Calculate the maximum packet length allowed */
+ len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS;
+ max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ /* Check if the jumbo frame and maximum packet length are set
+ * correctly.
+ */
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (max_pkt_len <= ETHER_MAX_LEN ||
+ max_pkt_len > AVF_FRAME_SIZE_MAX) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is enabled",
+ (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)AVF_FRAME_SIZE_MAX);
+ return -EINVAL;
+ }
+ } else {
+ if (max_pkt_len < ETHER_MIN_LEN ||
+ max_pkt_len > ETHER_MAX_LEN) {
+ PMD_DRV_LOG(ERR, "maximum packet length must be "
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is disabled",
+ (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
+ return -EINVAL;
+ }
+ }
+
+ rxq->max_pkt_len = max_pkt_len;
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+ (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
+ dev_data->scattered_rx = 1;
+ }
+ AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ AVF_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+avf_init_queues(struct rte_eth_dev *dev)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct avf_rx_queue **rxq =
+ (struct avf_rx_queue **)dev->data->rx_queues;
+ struct avf_tx_queue **txq =
+ (struct avf_tx_queue **)dev->data->tx_queues;
+ int i, ret = AVF_SUCCESS;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!rxq[i] || !rxq[i]->q_set)
+ continue;
+ ret = avf_init_rxq(dev, rxq[i]);
+ if (ret != AVF_SUCCESS)
+ break;
+ }
+ /* set rx/tx function to vector/scatter/single-segment
+ * according to parameters
+ */
+ avf_set_rx_function(dev);
+ avf_set_tx_function(dev);
+
+ return ret;
+}
+
+static int avf_config_rx_queues_irqs(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ uint16_t interval, i;
+ int vec;
+
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
+ if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
+ dev->data->nb_rx_queues);
+ return -1;
+ }
+ }
+
+ if (!dev->data->dev_conf.intr_conf.rxq ||
+ !rte_intr_dp_is_en(intr_handle)) {
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+ vf->nb_msix = 1;
+ if (vf->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+ /* If WB_ON_ITR supports, enable it */
+ vf->msix_base = AVF_RX_VEC_START;
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1),
+ AVFINT_DYN_CTLN1_ITR_INDX_MASK |
+ AVFINT_DYN_CTLN1_WB_ON_ITR_MASK);
+ } else {
+ /* If no WB_ON_ITR offload flags, need to set
+ * interrupt for descriptor write back.
+ */
+ vf->msix_base = AVF_MISC_VEC_ID;
+
+ /* set ITR to max */
+ interval = avf_calc_itr_interval(
+ AVF_QUEUE_ITR_INTERVAL_MAX);
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
+ AVFINT_DYN_CTL01_INTENA_MASK |
+ (AVF_ITR_INDEX_DEFAULT <<
+ AVFINT_DYN_CTL01_ITR_INDX_SHIFT) |
+ (interval <<
+ AVFINT_DYN_CTL01_INTERVAL_SHIFT));
+ }
+ AVF_WRITE_FLUSH(hw);
+ /* map all queues to the same interrupt */
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ vf->rxq_map[0] |= 1 << i;
+ } else {
+ if (!rte_intr_allow_others(intr_handle)) {
+ vf->nb_msix = 1;
+ vf->msix_base = AVF_MISC_VEC_ID;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vf->rxq_map[0] |= 1 << i;
+ intr_handle->intr_vec[i] = AVF_MISC_VEC_ID;
+ }
+ PMD_DRV_LOG(DEBUG,
+ "vector 0 are mapping to all Rx queues");
+ } else {
+ /* If Rx interrupt is reuquired, and we can use
+ * multi interrupts, then the vec is from 1
+ */
+ vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors,
+ intr_handle->nb_efd);
+ vf->msix_base = AVF_RX_VEC_START;
+ vec = AVF_RX_VEC_START;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vf->rxq_map[vec] |= 1 << i;
+ intr_handle->intr_vec[i] = vec++;
+ if (vec >= vf->nb_msix)
+ vec = AVF_RX_VEC_START;
+ }
+ PMD_DRV_LOG(DEBUG,
+ "%u vectors are mapping to %u Rx queues",
+ vf->nb_msix, dev->data->nb_rx_queues);
+ }
+ }
+
+ if (avf_config_irq_map(adapter)) {
+ PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+avf_start_queues(struct rte_eth_dev *dev)
+{
+ struct avf_rx_queue *rxq;
+ struct avf_tx_queue *txq;
+ int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq->tx_deferred_start)
+ continue;
+ if (avf_dev_tx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq->rx_deferred_start)
+ continue;
+ if (avf_dev_rx_queue_start(dev, i) != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+avf_dev_start(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->adapter_stopped = 0;
+
+ vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+
+ if (avf_init_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "failed to do Queue init");
+ return -1;
+ }
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (avf_init_rss(adapter) != 0) {
+ PMD_DRV_LOG(ERR, "configure rss failed");
+ goto err_rss;
+ }
+ }
+
+ if (avf_configure_queues(adapter) != 0) {
+ PMD_DRV_LOG(ERR, "configure queues failed");
+ goto err_queue;
+ }
+
+ if (avf_config_rx_queues_irqs(dev, intr_handle) != 0) {
+ PMD_DRV_LOG(ERR, "configure irq failed");
+ goto err_queue;
+ }
+ /* re-enable intr again, because efd assign may change */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ rte_intr_disable(intr_handle);
+ rte_intr_enable(intr_handle);
+ }
+
+ /* Set all mac addrs */
+ avf_add_del_all_mac_addr(adapter, TRUE);
+
+ if (avf_start_queues(dev) != 0) {
+ PMD_DRV_LOG(ERR, "enable queues failed");
+ goto err_mac;
+ }
+
+ return 0;
+
+err_mac:
+ avf_add_del_all_mac_addr(adapter, FALSE);
+err_queue:
+err_rss:
+ return -1;
+}
+
+static void
+avf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->adapter_stopped == 1)
+ return;
+
+ avf_stop_queues(dev);
+
+ /* Disable the interrupt for Rx */
+ rte_intr_efd_disable(intr_handle);
+ /* Rx interrupt vector mapping free */
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+
+ /* remove all mac addrs */
+ avf_add_del_all_mac_addr(adapter, FALSE);
+ hw->adapter_stopped = 1;
+}
+
+static void
+avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ memset(dev_info, 0, sizeof(*dev_info));
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
+ dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = vf->vf_res->rss_key_size;
+ dev_info->reta_size = vf->vf_res->rss_lut_size;
+ dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL;
+ dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = AVF_MAX_RING_DESC,
+ .nb_min = AVF_MIN_RING_DESC,
+ .nb_align = AVF_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = AVF_MAX_RING_DESC,
+ .nb_min = AVF_MIN_RING_DESC,
+ .nb_align = AVF_ALIGN_RING_DESC,
+ };
+}
+
+static const uint32_t *
+avf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+ return ptypes;
+}
+
+int
+avf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct rte_eth_link new_link;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ /* Only read status info stored in VF, and the info is updated
+ * when receive LINK_CHANGE evnet from PF by Virtchnnl.
+ */
+ switch (vf->link_speed) {
+ case VIRTCHNL_LINK_SPEED_100MB:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case VIRTCHNL_LINK_SPEED_1GB:
+ new_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case VIRTCHNL_LINK_SPEED_10GB:
+ new_link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case VIRTCHNL_LINK_SPEED_20GB:
+ new_link.link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case VIRTCHNL_LINK_SPEED_25GB:
+ new_link.link_speed = ETH_SPEED_NUM_25G;
+ break;
+ case VIRTCHNL_LINK_SPEED_40GB:
+ new_link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ default:
+ new_link.link_speed = ETH_SPEED_NUM_NONE;
+ break;
+ }
+
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? ETH_LINK_UP :
+ ETH_LINK_DOWN;
+ new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+
+ if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
+ *(uint64_t *)&dev->data->dev_link,
+ *(uint64_t *)&new_link) == 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+avf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (vf->promisc_unicast_enabled)
+ return;
+
+ ret = avf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled);
+ if (!ret)
+ vf->promisc_unicast_enabled = TRUE;
+}
+
+static void
+avf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (!vf->promisc_unicast_enabled)
+ return;
+
+ ret = avf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled);
+ if (!ret)
+ vf->promisc_unicast_enabled = FALSE;
+}
+
+static void
+avf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (vf->promisc_multicast_enabled)
+ return;
+
+ ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE);
+ if (!ret)
+ vf->promisc_multicast_enabled = TRUE;
+}
+
+static void
+avf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int ret;
+
+ if (!vf->promisc_multicast_enabled)
+ return;
+
+ ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE);
+ if (!ret)
+ vf->promisc_multicast_enabled = FALSE;
+}
+
+static int
+avf_dev_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int err;
+
+ if (is_zero_ether_addr(addr)) {
+ PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
+ return -EINVAL;
+ }
+
+ err = avf_add_del_eth_addr(adapter, addr, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to add MAC address");
+ return -EIO;
+ }
+
+ vf->mac_num++;
+
+ return 0;
+}
+
+static void
+avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct ether_addr *addr;
+ int err;
+
+ addr = &dev->data->mac_addrs[index];
+
+ err = avf_add_del_eth_addr(adapter, addr, FALSE);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to delete MAC address");
+
+ vf->mac_num--;
+}
+
+static int
+avf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ int err;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ return -ENOTSUP;
+
+ err = avf_add_del_vlan(adapter, vlan_id, on);
+ if (err)
+ return -EIO;
+ return 0;
+}
+
+static int
+avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ int err;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ return -ENOTSUP;
+
+ /* Vlan stripping setting */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping */
+ if (dev_conf->rxmode.hw_vlan_strip)
+ err = avf_enable_vlan_strip(adapter);
+ else
+ err = avf_disable_vlan_strip(adapter);
+
+ if (err)
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+avf_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ uint8_t *lut;
+ uint16_t i, idx, shift;
+ int ret;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ if (reta_size != vf->vf_res->rss_lut_size) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)", reta_size, vf->vf_res->rss_lut_size);
+ return -EINVAL;
+ }
+
+ lut = rte_zmalloc("rss_lut", reta_size, 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ /* store the old lut table temporarily */
+ rte_memcpy(lut, vf->rss_lut, reta_size);
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
+ }
+
+ rte_memcpy(vf->rss_lut, lut, reta_size);
+ /* send virtchnnl ops to configure rss*/
+ ret = avf_configure_rss_lut(adapter);
+ if (ret) /* revert back */
+ rte_memcpy(vf->rss_lut, lut, reta_size);
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+avf_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ uint16_t i, idx, shift;
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ if (reta_size != vf->vf_res->rss_lut_size) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)", reta_size, vf->vf_res->rss_lut_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = vf->rss_lut[i];
+ }
+
+ return 0;
+}
+
+static int
+avf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ /* HENA setting, it is enabled by default, no change */
+ if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ return 0;
+ } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) {
+ PMD_DRV_LOG(ERR, "The size of hash key configured "
+ "(%d) doesn't match the size of hardware can "
+ "support (%d)", rss_conf->rss_key_len,
+ vf->vf_res->rss_key_size);
+ return -EINVAL;
+ }
+
+ rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
+
+ return avf_configure_rss_key(adapter);
+}
+
+static int
+avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
+ return -ENOTSUP;
+
+ /* Just set it to default value now. */
+ rss_conf->rss_hf = AVF_RSS_OFFLOAD_ALL;
+
+ if (!rss_conf->rss_key)
+ return 0;
+
+ rss_conf->rss_key_len = vf->vf_res->rss_key_size;
+ rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len);
+
+ return 0;
+}
+
+static int
+avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint32_t frame_size = mtu + AVF_ETH_OVERHEAD;
+ int ret = 0;
+
+ if (mtu < ETHER_MIN_MTU || frame_size > AVF_FRAME_SIZE_MAX)
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev->data->dev_started) {
+ PMD_DRV_LOG(ERR, "port must be stopped before configuration");
+ return -EBUSY;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
+
+static void
+avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct ether_addr *perm_addr, *old_addr;
+ int ret;
+
+ old_addr = (struct ether_addr *)hw->mac.addr;
+ perm_addr = (struct ether_addr *)hw->mac.perm_addr;
+
+ if (is_same_ether_addr(mac_addr, old_addr))
+ return;
+
+ /* If the MAC address is configured by host, skip the setting */
+ if (is_valid_assigned_ether_addr(perm_addr))
+ return;
+
+ ret = avf_add_del_eth_addr(adapter, old_addr, FALSE);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Fail to delete old MAC:"
+ " %02X:%02X:%02X:%02X:%02X:%02X",
+ old_addr->addr_bytes[0],
+ old_addr->addr_bytes[1],
+ old_addr->addr_bytes[2],
+ old_addr->addr_bytes[3],
+ old_addr->addr_bytes[4],
+ old_addr->addr_bytes[5]);
+
+ ret = avf_add_del_eth_addr(adapter, mac_addr, TRUE);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Fail to add new MAC:"
+ " %02X:%02X:%02X:%02X:%02X:%02X",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5]);
+
+ ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
+}
+
+static int
+avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct virtchnl_eth_stats *pstats = NULL;
+ int ret;
+
+ ret = avf_query_stats(adapter, &pstats);
+ if (ret == 0) {
+ stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
+ pstats->rx_broadcast;
+ stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
+ pstats->tx_unicast;
+ stats->imissed = pstats->rx_discards;
+ stats->oerrors = pstats->tx_errors + pstats->tx_discards;
+ stats->ibytes = pstats->rx_bytes;
+ stats->obytes = pstats->tx_bytes;
+ } else {
+ PMD_DRV_LOG(ERR, "Get statistics failed");
+ }
+ return -EIO;
+}
+
+static int
+avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ uint16_t msix_intr;
+
+ msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
+ if (msix_intr == AVF_MISC_VEC_ID) {
+ PMD_DRV_LOG(INFO, "MISC is also enabled for control");
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
+ AVFINT_DYN_CTL01_INTENA_MASK |
+ AVFINT_DYN_CTL01_ITR_INDX_MASK);
+ } else {
+ AVF_WRITE_REG(hw,
+ AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START),
+ AVFINT_DYN_CTLN1_INTENA_MASK |
+ AVFINT_DYN_CTLN1_ITR_INDX_MASK);
+ }
+
+ AVF_WRITE_FLUSH(hw);
+
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ return 0;
+}
+
+static int
+avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t msix_intr;
+
+ msix_intr = pci_dev->intr_handle.intr_vec[queue_id];
+ if (msix_intr == AVF_MISC_VEC_ID) {
+ PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it");
+ return -EIO;
+ }
+
+ AVF_WRITE_REG(hw,
+ AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START),
+ 0);
+
+ AVF_WRITE_FLUSH(hw);
+ return 0;
+}
+
+static int
+avf_check_vf_reset_done(struct avf_hw *hw)
+{
+ int i, reset;
+
+ for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
+ reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
+ AVFGEN_RSTAT_VFR_STATE_MASK;
+ reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
+ if (reset == VIRTCHNL_VFR_VFACTIVE ||
+ reset == VIRTCHNL_VFR_COMPLETED)
+ break;
+ rte_delay_ms(20);
+ }
+
+ if (i >= AVF_RESET_WAIT_CNT)
+ return -1;
+
+ return 0;
+}
+
+static int
+avf_init_vf(struct rte_eth_dev *dev)
+{
+ int i, err, bufsz;
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ err = avf_set_mac_type(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
+ goto err;
+ }
+
+ err = avf_check_vf_reset_done(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "VF is still resetting");
+ goto err;
+ }
+
+ avf_init_adminq_parameter(hw);
+ err = avf_init_adminq(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
+ goto err;
+ }
+
+ vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
+ if (!vf->aq_resp) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
+ goto err_aq;
+ }
+ if (avf_check_api_version(adapter) != 0) {
+ PMD_INIT_LOG(ERR, "check_api version failed");
+ goto err_api;
+ }
+
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
+ vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
+ if (!vf->vf_res) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
+ goto err_api;
+ }
+ if (avf_get_vf_resource(adapter) != 0) {
+ PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
+ goto err_alloc;
+ }
+ /* Allocate memort for RSS info */
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vf->rss_key = rte_zmalloc("rss_key",
+ vf->vf_res->rss_key_size, 0);
+ if (!vf->rss_key) {
+ PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
+ goto err_rss;
+ }
+ vf->rss_lut = rte_zmalloc("rss_lut",
+ vf->vf_res->rss_lut_size, 0);
+ if (!vf->rss_lut) {
+ PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
+ goto err_rss;
+ }
+ }
+ return 0;
+err_rss:
+ rte_free(vf->rss_key);
+ rte_free(vf->rss_lut);
+err_alloc:
+ rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+err_api:
+ rte_free(vf->aq_resp);
+err_aq:
+ avf_shutdown_adminq(hw);
+err:
+ return -1;
+}
+
+/* Enable default admin queue interrupt setting */
+static inline void
+avf_enable_irq0(struct avf_hw *hw)
+{
+ /* Enable admin queue interrupt trigger */
+ AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
+
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
+ AVFINT_DYN_CTL01_ITR_INDX_MASK);
+
+ AVF_WRITE_FLUSH(hw);
+}
+
+static inline void
+avf_disable_irq0(struct avf_hw *hw)
+{
+ /* Disable all interrupt types */
+ AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
+ AVFINT_DYN_CTL01_ITR_INDX_MASK);
+ AVF_WRITE_FLUSH(hw);
+}
+
+static void
+avf_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ avf_disable_irq0(hw);
+
+ avf_handle_virtchnl_msg(dev);
+
+done:
+ avf_enable_irq0(hw);
+}
+
+static int
+avf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* assign ops func pointer */
+ eth_dev->dev_ops = &avf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &avf_recv_pkts;
+ eth_dev->tx_pkt_burst = &avf_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &avf_prep_pkts;
+
+ /* For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check if we need a different RX
+ * and TX function.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ avf_set_rx_function(eth_dev);
+ avf_set_tx_function(eth_dev);
+ return 0;
+ }
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.bus_id = pci_dev->addr.bus;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+ adapter->eth_dev = eth_dev;
+
+ if (avf_init_vf(eth_dev) != 0) {
+ PMD_INIT_LOG(ERR, "Init vf failed");
+ return -1;
+ }
+
+ /* copy mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc(
+ "avf_mac",
+ ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
+ 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
+ " store MAC addresses",
+ ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
+ return -ENOMEM;
+ }
+ /* If the MAC address is not configured by host,
+ * generate a random one.
+ */
+ if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+ eth_random_addr(hw->mac.addr);
+ ether_addr_copy((struct ether_addr *)hw->mac.addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ /* register callback func to eal lib */
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ avf_dev_interrupt_handler,
+ (void *)eth_dev);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ /* configure and enable device interrupt */
+ avf_enable_irq0(hw);
+
+ return 0;
+}
+
+static void
+avf_dev_close(struct rte_eth_dev *dev)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ avf_dev_stop(dev);
+ avf_shutdown_adminq(hw);
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ avf_dev_interrupt_handler, dev);
+ avf_disable_irq0(hw);
+}
+
+static int
+avf_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+ if (hw->adapter_stopped == 0)
+ avf_dev_close(dev);
+
+ rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+ vf->vf_res = NULL;
+
+ rte_free(vf->aq_resp);
+ vf->aq_resp = NULL;
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ if (vf->rss_lut) {
+ rte_free(vf->rss_lut);
+ vf->rss_lut = NULL;
+ }
+ if (vf->rss_key) {
+ rte_free(vf->rss_key);
+ vf->rss_key = NULL;
+ }
+
+ return 0;
+}
+
+static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct avf_adapter), avf_dev_init);
+}
+
+static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
+}
+
+/* Adaptive virtual function driver struct */
+static struct rte_pci_driver rte_avf_pmd = {
+ .id_table = pci_id_avf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_avf_pci_probe,
+ .remove = eth_avf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
+RTE_INIT(avf_init_log);
+static void
+avf_init_log(void)
+{
+ avf_logtype_init = rte_log_register("pmd.net.avf.init");
+ if (avf_logtype_init >= 0)
+ rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
+ avf_logtype_driver = rte_log_register("pmd.net.avf.driver");
+ if (avf_logtype_driver >= 0)
+ rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
+}
+
+/* memory func for base code */
+enum avf_status_code
+avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_dma_mem *mem,
+ u64 size,
+ u32 alignment)
+{
+ const struct rte_memzone *mz = NULL;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
+ alignment, RTE_PGSIZE_2M);
+ if (!mz)
+ return AVF_ERR_NO_MEMORY;
+
+ mem->size = size;
+ mem->va = mz->addr;
+ mem->pa = mz->phys_addr;
+ mem->zone = (const void *)mz;
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s allocated with physical address: %"PRIu64,
+ mz->name, mem->pa);
+
+ return AVF_SUCCESS;
+}
+
+enum avf_status_code
+avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_dma_mem *mem)
+{
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s to be freed with physical address: %"PRIu64,
+ ((const struct rte_memzone *)mem->zone)->name, mem->pa);
+ rte_memzone_free((const struct rte_memzone *)mem->zone);
+ mem->zone = NULL;
+ mem->va = NULL;
+ mem->pa = (u64)0;
+
+ return AVF_SUCCESS;
+}
+
+enum avf_status_code
+avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_virt_mem *mem,
+ u32 size)
+{
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = rte_zmalloc("avf", size, 0);
+
+ if (mem->va)
+ return AVF_SUCCESS;
+ else
+ return AVF_ERR_NO_MEMORY;
+}
+
+enum avf_status_code
+avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_virt_mem *mem)
+{
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ rte_free(mem->va);
+ mem->va = NULL;
+
+ return AVF_SUCCESS;
+}
diff --git a/drivers/net/avf/avf_log.h b/drivers/net/avf/avf_log.h
new file mode 100644
index 00000000..8d574d3f
--- /dev/null
+++ b/drivers/net/avf/avf_log.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_LOG_H_
+#define _AVF_LOG_H_
+
+extern int avf_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, avf_logtype_init, "%s(): " fmt "\n", \
+ __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int avf_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, avf_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_AVF_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_AVF_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_AVF_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#endif /* _AVF_LOG_H_ */
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
new file mode 100644
index 00000000..d276d975
--- /dev/null
+++ b/drivers/net/avf/avf_rxtx.c
@@ -0,0 +1,1959 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_net.h>
+
+#include "avf_log.h"
+#include "base/avf_prototype.h"
+#include "base/avf_type.h"
+#include "avf.h"
+#include "avf_rxtx.h"
+
+static inline int
+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+{
+ /* The following constraints must be satisfied:
+ * thresh < rxq->nb_rx_desc
+ */
+ if (thresh >= nb_desc) {
+ PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
+ thresh, nb_desc);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int
+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
+ uint16_t tx_free_thresh)
+{
+ /* TX descriptors will have their RS bit set after tx_rs_thresh
+ * descriptors have been used. The TX descriptor ring will be cleaned
+ * after tx_free_thresh descriptors are used or if the number of
+ * descriptors required to transmit a packet is greater than the
+ * number of free TX descriptors.
+ *
+ * The following constraints must be satisfied:
+ * - tx_rs_thresh must be less than the size of the ring minus 2.
+ * - tx_free_thresh must be less than the size of the ring minus 3.
+ * - tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * - tx_rs_thresh must be a divisor of the ring size.
+ *
+ * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+ * race condition, hence the maximum threshold constraints. When set
+ * to zero use default values.
+ */
+ if (tx_rs_thresh >= (nb_desc - 2)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
+ "number of TX descriptors (%u) minus 2",
+ tx_rs_thresh, nb_desc);
+ return -EINVAL;
+ }
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
+ "number of TX descriptors (%u) minus 3.",
+ tx_free_thresh, nb_desc);
+ return -EINVAL;
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
+ "equal to tx_free_thresh (%u).",
+ tx_rs_thresh, tx_free_thresh);
+ return -EINVAL;
+ }
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
+ "number of TX descriptors (%u).",
+ tx_rs_thresh, nb_desc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef RTE_LIBRTE_AVF_INC_VECTOR
+static inline bool
+check_rx_vec_allow(struct avf_rx_queue *rxq)
+{
+ if (rxq->rx_free_thresh >= AVF_VPMD_RX_MAX_BURST &&
+ rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
+ PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
+ return TRUE;
+ }
+
+ PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
+ return FALSE;
+}
+
+static inline bool
+check_tx_vec_allow(struct avf_tx_queue *txq)
+{
+ if ((txq->txq_flags & AVF_SIMPLE_FLAGS) == AVF_SIMPLE_FLAGS &&
+ txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST &&
+ txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) {
+ PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
+ return TRUE;
+ }
+ PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
+ return FALSE;
+}
+#endif
+
+static inline bool
+check_rx_bulk_allow(struct avf_rx_queue *rxq)
+{
+ int ret = TRUE;
+
+ if (!(rxq->rx_free_thresh >= AVF_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "AVF_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, AVF_RX_MAX_BURST);
+ ret = FALSE;
+ } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
+ ret = FALSE;
+ }
+ return ret;
+}
+
+static inline void
+reset_rx_queue(struct avf_rx_queue *rxq)
+{
+ uint16_t len, i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc + AVF_RX_MAX_BURST;
+
+ for (i = 0; i < len * sizeof(union avf_rx_desc); i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+ for (i = 0; i < AVF_RX_MAX_BURST; i++)
+ rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+ /* for rx bulk */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_next_avail = 0;
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+}
+
+static inline void
+reset_tx_queue(struct avf_tx_queue *txq)
+{
+ struct avf_tx_entry *txe;
+ uint16_t i, prev, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ txe = txq->sw_ring;
+ size = sizeof(struct avf_tx_desc) * txq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->tx_ring)[i] = 0;
+
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i].cmd_type_offset_bsz =
+ rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_tail = 0;
+ txq->nb_used = 0;
+
+ txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+ txq->nb_free = txq->nb_tx_desc - 1;
+
+ txq->next_dd = txq->rs_thresh - 1;
+ txq->next_rs = txq->rs_thresh - 1;
+}
+
+static int
+alloc_rxq_mbufs(struct avf_rx_queue *rxq)
+{
+ volatile union avf_rx_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &rxq->rx_ring[i];
+ rxd->read.pkt_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
+#ifndef RTE_LIBRTE_AVF_16BYTE_RX_DESC
+ rxd->read.rsvd1 = 0;
+ rxd->read.rsvd2 = 0;
+#endif
+
+ rxq->sw_ring[i] = mbuf;
+ }
+
+ return 0;
+}
+
+static inline void
+release_rxq_mbufs(struct avf_rx_queue *rxq)
+{
+ struct rte_mbuf *mbuf;
+ uint16_t i;
+
+ if (!rxq->sw_ring)
+ return;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i]) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ rxq->sw_ring[i] = NULL;
+ }
+ }
+
+ /* for rx bulk */
+ if (rxq->rx_nb_avail == 0)
+ return;
+ for (i = 0; i < rxq->rx_nb_avail; i++) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
+ rte_pktmbuf_free_seg(mbuf);
+ }
+ rxq->rx_nb_avail = 0;
+}
+
+static inline void
+release_txq_mbufs(struct avf_tx_queue *txq)
+{
+ uint16_t i;
+
+ if (!txq || !txq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+static const struct avf_rxq_ops def_rxq_ops = {
+ .release_mbufs = release_rxq_mbufs,
+};
+
+static const struct avf_txq_ops def_txq_ops = {
+ .release_mbufs = release_txq_mbufs,
+};
+
+int
+avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_adapter *ad =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_rx_queue *rxq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t len, i;
+ uint16_t rx_free_thresh;
+ uint16_t base, bsf, tc_mapping;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
+ nb_desc > AVF_MAX_RING_DESC ||
+ nb_desc < AVF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
+ "invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Check free threshold */
+ rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+ AVF_DEFAULT_RX_FREE_THRESH :
+ rx_conf->rx_free_thresh;
+ if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
+ return -EINVAL;
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ avf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("avf rxq",
+ sizeof(struct avf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for "
+ "rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq->mp = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_free_thresh;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->crc_len = 0; /* crc stripping by default */
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->rx_hdr_len = 0;
+
+ len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+ rxq->rx_buf_len = RTE_ALIGN(len, (1 << AVF_RXQ_CTX_DBUFF_SHIFT));
+
+ /* Allocate the software ring. */
+ len = nb_desc + AVF_RX_MAX_BURST;
+ rxq->sw_ring =
+ rte_zmalloc_socket("avf rx sw ring",
+ sizeof(struct rte_mbuf *) * len,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /* Allocate the maximun number of RX ring hardware descriptor with
+ * a liitle more to support bulk allocate.
+ */
+ len = AVF_MAX_RING_DESC + AVF_RX_MAX_BURST;
+ ring_size = RTE_ALIGN(len * sizeof(union avf_rx_desc),
+ AVF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ ring_size, AVF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+ /* Zero all the descriptors in the ring. */
+ memset(mz->addr, 0, ring_size);
+ rxq->rx_ring_phys_addr = mz->iova;
+ rxq->rx_ring = (union avf_rx_desc *)mz->addr;
+
+ rxq->mz = mz;
+ reset_rx_queue(rxq);
+ rxq->q_set = TRUE;
+ dev->data->rx_queues[queue_idx] = rxq;
+ rxq->qrx_tail = hw->hw_addr + AVF_QRX_TAIL1(rxq->queue_id);
+ rxq->ops = &def_rxq_ops;
+
+ if (check_rx_bulk_allow(rxq) == TRUE) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "not satisfied, Scattered Rx is requested "
+ "on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+
+#ifdef RTE_LIBRTE_AVF_INC_VECTOR
+ if (check_rx_vec_allow(rxq) == FALSE)
+ ad->rx_vec_allowed = false;
+#endif
+ return 0;
+}
+
+int
+avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_adapter *ad =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_tx_queue *txq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+ uint16_t i, base, bsf, tc_mapping;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
+ nb_desc > AVF_MAX_RING_DESC ||
+ nb_desc < AVF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
+ "invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ avf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("avf txq",
+ sizeof(struct avf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->rs_thresh = tx_rs_thresh;
+ txq->free_thresh = tx_free_thresh;
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->txq_flags = tx_conf->txq_flags;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+ /* Allocate software ring */
+ txq->sw_ring =
+ rte_zmalloc_socket("avf tx sw ring",
+ sizeof(struct avf_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct avf_tx_desc) * AVF_MAX_RING_DESC;
+ ring_size = RTE_ALIGN(ring_size, AVF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, AVF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->tx_ring = (struct avf_tx_desc *)mz->addr;
+
+ txq->mz = mz;
+ reset_tx_queue(txq);
+ txq->q_set = TRUE;
+ dev->data->tx_queues[queue_idx] = txq;
+ txq->qtx_tail = hw->hw_addr + AVF_QTX_TAIL1(queue_idx);
+ txq->ops = &def_txq_ops;
+
+#ifdef RTE_LIBRTE_AVF_INC_VECTOR
+ if (check_tx_vec_allow(txq) == FALSE)
+ ad->tx_vec_allowed = false;
+#endif
+
+ return 0;
+}
+
+int
+avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_rx_queue *rxq;
+ int err = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ err = alloc_rxq_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail register. */
+ AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ AVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = avf_switch_queue(adapter, rx_queue_id, TRUE, TRUE);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ else
+ dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+
+ return err;
+}
+
+int
+avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_tx_queue *txq;
+ int err = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /* Init the RX tail register. */
+ AVF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ AVF_WRITE_FLUSH(hw);
+
+ /* Ready to switch the queue on */
+ err = avf_switch_queue(adapter, tx_queue_id, FALSE, TRUE);
+
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ else
+ dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+
+ return err;
+}
+
+int
+avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_rx_queue *rxq;
+ int err;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ err = avf_switch_queue(adapter, rx_queue_id, TRUE, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxq->ops->release_mbufs(rxq);
+ reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_tx_queue *txq;
+ int err;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ err = avf_switch_queue(adapter, tx_queue_id, FALSE, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
+ }
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ txq->ops->release_mbufs(txq);
+ reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+void
+avf_dev_rx_queue_release(void *rxq)
+{
+ struct avf_rx_queue *q = (struct avf_rx_queue *)rxq;
+
+ if (!q)
+ return;
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+}
+
+void
+avf_dev_tx_queue_release(void *txq)
+{
+ struct avf_tx_queue *q = (struct avf_tx_queue *)txq;
+
+ if (!q)
+ return;
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+}
+
+void
+avf_stop_queues(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_rx_queue *rxq;
+ struct avf_tx_queue *txq;
+ int ret, i;
+
+ /* Stop All queues */
+ ret = avf_disable_queues(adapter);
+ if (ret)
+ PMD_DRV_LOG(WARNING, "Fail to stop queues");
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+ txq->ops->release_mbufs(txq);
+ reset_tx_queue(txq);
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq)
+ continue;
+ rxq->ops->release_mbufs(rxq);
+ reset_rx_queue(rxq);
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+}
+
+static inline void
+avf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union avf_rx_desc *rxdp)
+{
+ if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ (1 << AVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+ mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mb->vlan_tci =
+ rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
+ } else {
+ mb->vlan_tci = 0;
+ }
+}
+
+/* Translate the rx descriptor status and error fields to pkt flags */
+static inline uint64_t
+avf_rxd_to_pkt_flags(uint64_t qword)
+{
+ uint64_t flags;
+ uint64_t error_bits = (qword >> AVF_RXD_QW1_ERROR_SHIFT);
+
+#define AVF_RX_ERR_BITS 0x3f
+
+ /* Check if RSS_HASH */
+ flags = (((qword >> AVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ AVF_RX_DESC_FLTSTAT_RSS_HASH) ==
+ AVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+
+ if (likely((error_bits & AVF_RX_ERR_BITS) == 0)) {
+ flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+ return flags;
+ }
+
+ if (unlikely(error_bits & (1 << AVF_RX_DESC_ERROR_IPE_SHIFT)))
+ flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (unlikely(error_bits & (1 << AVF_RX_DESC_ERROR_L4E_SHIFT)))
+ flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* TODO: Oversize error bit is not processed here */
+
+ return flags;
+}
+
+/* implement recv_pkts */
+uint16_t
+avf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ volatile union avf_rx_desc *rx_ring;
+ volatile union avf_rx_desc *rxdp;
+ struct avf_rx_queue *rxq;
+ union avf_rx_desc rxd;
+ struct rte_mbuf *rxe;
+ struct rte_eth_dev *dev;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ uint16_t nb_rx;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint16_t rx_packet_len;
+ uint16_t rx_id, nb_hold;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+ static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ /* [2] - [21] reserved */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ /* All others reserved */
+ };
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
+ AVF_RXD_QW1_STATUS_SHIFT;
+
+ /* Check the DD bit first */
+ if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+ AVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ dev = &rte_eth_devices[rxq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", rxq->port_id, rxq->queue_id);
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = rxq->sw_ring[rx_id];
+ rx_id++;
+ if (unlikely(rx_id == rxq->nb_rx_desc))
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+
+ /* When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+ }
+ rxm = rxe;
+ rxe = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+
+ rx_packet_len = ((qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ AVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = rx_packet_len;
+ rxm->data_len = rx_packet_len;
+ rxm->port = rxq->port_id;
+ rxm->ol_flags = 0;
+ avf_rxd_to_vlan_tci(rxm, &rxd);
+ pkt_flags = avf_rxd_to_pkt_flags(qword1);
+ rxm->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ AVF_RXD_QW1_PTYPE_MASK) >> AVF_RXD_QW1_PTYPE_SHIFT)];
+
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ rxm->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+
+ rxm->ol_flags |= pkt_flags;
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /* If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the receive tail register of queue.
+ * Update that register with the value of the last processed RX
+ * descriptor minus 1.
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ rxq->port_id, rxq->queue_id,
+ rx_id, nb_hold, nb_rx);
+ rx_id = (uint16_t)((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+/* implement recv_scattered_pkts */
+uint16_t
+avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avf_rx_queue *rxq = rx_queue;
+ union avf_rx_desc rxd;
+ struct rte_mbuf *rxe;
+ struct rte_mbuf *first_seg = rxq->pkt_first_seg;
+ struct rte_mbuf *last_seg = rxq->pkt_last_seg;
+ struct rte_mbuf *nmb, *rxm;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
+ uint32_t rx_status;
+ uint64_t qword1;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+
+ volatile union avf_rx_desc *rx_ring = rxq->rx_ring;
+ volatile union avf_rx_desc *rxdp;
+ static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ /* [2] - [21] reserved */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ /* All others reserved */
+ };
+
+ while (nb_rx < nb_pkts) {
+ rxdp = &rx_ring[rx_id];
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
+ AVF_RXD_QW1_STATUS_SHIFT;
+
+ /* Check the DD bit */
+ if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+ AVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", rxq->port_id, rxq->queue_id);
+ dev = &rte_eth_devices[rxq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = rxq->sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+
+ /* When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+ }
+
+ rxm = rxe;
+ rxe = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
+ /* Set data buffer address and data length of the mbuf */
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+ rx_packet_len = (qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ AVF_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rxm->data_len = rx_packet_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* If this is the first buffer of the received packet, set the
+ * pointer to the first mbuf of the packet and initialize its
+ * context. Otherwise, update the total length and the number
+ * of segments of the current scattered packet, and update the
+ * pointer to the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = rxm;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_packet_len;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rx_packet_len);
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /* If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(rx_status & (1 << AVF_RX_DESC_STATUS_EOF_SHIFT))) {
+ last_seg = rxm;
+ continue;
+ }
+
+ /* This is the last buffer of the received packet. If the CRC
+ * is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer. If part
+ * of the CRC is also contained in the previous mbuf, subtract
+ * the length of that CRC part from the data length of the
+ * previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= ETHER_CRC_LEN;
+ if (rx_packet_len <= ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len =
+ (uint16_t)(last_seg->data_len -
+ (ETHER_CRC_LEN - rx_packet_len));
+ last_seg->next = NULL;
+ } else
+ rxm->data_len = (uint16_t)(rx_packet_len -
+ ETHER_CRC_LEN);
+ }
+
+ first_seg->port = rxq->port_id;
+ first_seg->ol_flags = 0;
+ avf_rxd_to_vlan_tci(first_seg, &rxd);
+ pkt_flags = avf_rxd_to_pkt_flags(qword1);
+ first_seg->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ AVF_RXD_QW1_PTYPE_MASK) >> AVF_RXD_QW1_PTYPE_SHIFT)];
+
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ first_seg->hash.rss =
+ rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+
+ first_seg->ol_flags |= pkt_flags;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
+ rx_pkts[nb_rx++] = first_seg;
+ first_seg = NULL;
+ }
+
+ /* Record index of the next RX descriptor to probe. */
+ rxq->rx_tail = rx_id;
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ /* If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register. Update the RDT with the value of the last processed RX
+ * descriptor minus 1, to guarantee that the RDT register is never
+ * equal to the RDH register, which creates a "full" ring situtation
+ * from the hardware point of view.
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ rxq->port_id, rxq->queue_id,
+ rx_id, nb_hold, nb_rx);
+ rx_id = (uint16_t)(rx_id == 0 ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+#define AVF_LOOK_AHEAD 8
+static inline int
+avf_rx_scan_hw_ring(struct avf_rx_queue *rxq)
+{
+ volatile union avf_rx_desc *rxdp;
+ struct rte_mbuf **rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t qword1;
+ uint32_t rx_status;
+ int32_t s[AVF_LOOK_AHEAD], nb_dd;
+ int32_t i, j, nb_rx = 0;
+ uint64_t pkt_flags;
+ static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ /* [2] - [21] reserved */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ /* All others reserved */
+ };
+
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
+ rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
+ AVF_RXD_QW1_STATUS_SHIFT;
+
+ /* Make sure there is at least 1 packet to receive */
+ if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* Scan LOOK_AHEAD descriptors at a time to determine which
+ * descriptors reference packets that are ready to be received.
+ */
+ for (i = 0; i < AVF_RX_MAX_BURST; i += AVF_LOOK_AHEAD,
+ rxdp += AVF_LOOK_AHEAD, rxep += AVF_LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = AVF_LOOK_AHEAD - 1; j >= 0; j--) {
+ qword1 = rte_le_to_cpu_64(
+ rxdp[j].wb.qword1.status_error_len);
+ s[j] = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
+ AVF_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rte_smp_rmb();
+
+ /* Compute how many status bits were set */
+ for (j = 0, nb_dd = 0; j < AVF_LOOK_AHEAD; j++)
+ nb_dd += s[j] & (1 << AVF_RX_DESC_STATUS_DD_SHIFT);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf parameters */
+ for (j = 0; j < nb_dd; j++) {
+ AVF_DUMP_RX_DESC(rxq, &rxdp[j],
+ rxq->rx_tail + i * AVF_LOOK_AHEAD + j);
+
+ mb = rxep[j];
+ qword1 = rte_le_to_cpu_64
+ (rxdp[j].wb.qword1.status_error_len);
+ pkt_len = ((qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ AVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->ol_flags = 0;
+ avf_rxd_to_vlan_tci(mb, &rxdp[j]);
+ pkt_flags = avf_rxd_to_pkt_flags(qword1);
+ mb->packet_type =
+ ptype_tbl[(uint8_t)((qword1 &
+ AVF_RXD_QW1_PTYPE_MASK) >>
+ AVF_RXD_QW1_PTYPE_SHIFT)];
+
+ if (pkt_flags & PKT_RX_RSS_HASH)
+ mb->hash.rss = rte_le_to_cpu_32(
+ rxdp[j].wb.qword0.hi_dword.rss);
+
+ mb->ol_flags |= pkt_flags;
+ }
+
+ for (j = 0; j < AVF_LOOK_AHEAD; j++)
+ rxq->rx_stage[i + j] = rxep[j];
+
+ if (nb_dd != AVF_LOOK_AHEAD)
+ break;
+ }
+
+ /* Clear software ring entries */
+ for (i = 0; i < nb_rx; i++)
+ rxq->sw_ring[rxq->rx_tail + i] = NULL;
+
+ return nb_rx;
+}
+
+static inline uint16_t
+avf_rx_fill_from_stage(struct avf_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t i;
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ for (i = 0; i < nb_pkts; i++)
+ rx_pkts[i] = stage[i];
+
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline int
+avf_rx_alloc_bufs(struct avf_rx_queue *rxq)
+{
+ volatile union avf_rx_desc *rxdp;
+ struct rte_mbuf **rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx, i;
+ uint64_t dma_addr;
+ int diag;
+
+ /* Allocate buffers in bulk */
+ alloc_idx = (uint16_t)(rxq->rx_free_trigger -
+ (rxq->rx_free_thresh - 1));
+ rxep = &rxq->sw_ring[alloc_idx];
+ diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0)) {
+ PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
+ return -ENOMEM;
+ }
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; i++) {
+ if (likely(i < (rxq->rx_free_thresh - 1)))
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxep[i + 1]);
+
+ mb = rxep[i];
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->nb_segs = 1;
+ mb->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
+ rxdp[i].read.hdr_addr = 0;
+ rxdp[i].read.pkt_addr = dma_addr;
+ }
+
+ /* Update rx tail register */
+ rte_wmb();
+ AVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
+
+ rxq->rx_free_trigger =
+ (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+
+ return 0;
+}
+
+static inline uint16_t
+rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct avf_rx_queue *rxq = (struct avf_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev;
+ uint16_t nb_rx = 0;
+
+ if (!nb_pkts)
+ return 0;
+
+ if (rxq->rx_nb_avail)
+ return avf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ nb_rx = (uint16_t)avf_rx_scan_hw_ring(rxq);
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ if (avf_rx_alloc_bufs(rxq) != 0) {
+ uint16_t i, j;
+
+ /* TODO: count rx_mbuf_alloc_failed here */
+
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
+ rxq->sw_ring[j] = rxq->rx_stage[i];
+
+ return 0;
+ }
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
+ rxq->port_id, rxq->queue_id,
+ rxq->rx_tail, nb_rx);
+
+ if (rxq->rx_nb_avail)
+ return avf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+static uint16_t
+avf_recv_pkts_bulk_alloc(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx = 0, n, count;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= AVF_RX_MAX_BURST))
+ return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ while (nb_pkts) {
+ n = RTE_MIN(nb_pkts, AVF_RX_MAX_BURST);
+ count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + count);
+ nb_pkts = (uint16_t)(nb_pkts - count);
+ if (count < n)
+ break;
+ }
+
+ return nb_rx;
+}
+
+static inline int
+avf_xmit_cleanup(struct avf_tx_queue *txq)
+{
+ struct avf_tx_entry *sw_ring = txq->sw_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+
+ volatile struct avf_tx_desc *txd = txq->tx_ring;
+
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE)) {
+ PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
+ "(port=%d queue=%d)", desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ return -1;
+ }
+
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
+
+ return 0;
+}
+
+/* Check if the context descriptor is needed for TX offloading */
+static inline uint16_t
+avf_calc_context_desc(uint64_t flags)
+{
+ static uint64_t mask = PKT_TX_TCP_SEG;
+
+ return (flags & mask) ? 1 : 0;
+}
+
+static inline void
+avf_txd_enable_checksum(uint64_t ol_flags,
+ uint32_t *td_cmd,
+ uint32_t *td_offset,
+ union avf_tx_offload tx_offload)
+{
+ /* Set MACLEN */
+ *td_offset |= (tx_offload.l2_len >> 1) <<
+ AVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L3 checksum offloads */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ *td_offset |= (tx_offload.l3_len >> 2) <<
+ AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV4) {
+ *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV4;
+ *td_offset |= (tx_offload.l3_len >> 2) <<
+ AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV6;
+ *td_offset |= (tx_offload.l3_len >> 2) <<
+ AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (tx_offload.l4_len >> 2) <<
+ AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ return;
+ }
+
+ /* Enable L4 checksum offloads */
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+ AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
+ AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+ AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/* set TSO context descriptor
+ * support IP -> L4 and IP -> IP -> L4
+ */
+static inline uint64_t
+avf_set_tso_ctx(struct rte_mbuf *mbuf, union avf_tx_offload tx_offload)
+{
+ uint64_t ctx_desc = 0;
+ uint32_t cd_cmd, hdr_len, cd_tso_len;
+
+ if (!tx_offload.l4_len) {
+ PMD_TX_LOG(DEBUG, "L4 length set to 0");
+ return ctx_desc;
+ }
+
+ /* in case of non tunneling packet, the outer_l2_len and
+ * outer_l3_len must be 0.
+ */
+ hdr_len = tx_offload.l2_len +
+ tx_offload.l3_len +
+ tx_offload.l4_len;
+
+ cd_cmd = AVF_TX_CTX_DESC_TSO;
+ cd_tso_len = mbuf->pkt_len - hdr_len;
+ ctx_desc |= ((uint64_t)cd_cmd << AVF_TXD_CTX_QW1_CMD_SHIFT) |
+ ((uint64_t)cd_tso_len << AVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((uint64_t)mbuf->tso_segsz << AVF_TXD_CTX_QW1_MSS_SHIFT);
+
+ return ctx_desc;
+}
+
+/* Construct the tx flags */
+static inline uint64_t
+avf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
+ uint32_t td_tag)
+{
+ return rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DATA |
+ ((uint64_t)td_cmd << AVF_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)td_offset <<
+ AVF_TXD_QW1_OFFSET_SHIFT) |
+ ((uint64_t)size <<
+ AVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((uint64_t)td_tag <<
+ AVF_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/* TX function */
+uint16_t
+avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ volatile struct avf_tx_desc *txd;
+ volatile struct avf_tx_desc *txr;
+ struct avf_tx_queue *txq;
+ struct avf_tx_entry *sw_ring;
+ struct avf_tx_entry *txe, *txn;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint16_t tx_id;
+ uint16_t nb_tx;
+ uint32_t td_cmd;
+ uint32_t td_offset;
+ uint32_t td_tag;
+ uint64_t ol_flags;
+ uint16_t nb_used;
+ uint16_t nb_ctx;
+ uint16_t tx_last;
+ uint16_t slen;
+ uint64_t buf_dma_addr;
+ union avf_tx_offload tx_offload = {0};
+
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Check if the descriptor ring needs to be cleaned. */
+ if (txq->nb_free < txq->free_thresh)
+ avf_xmit_cleanup(txq);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ td_cmd = 0;
+ td_tag = 0;
+ td_offset = 0;
+
+ tx_pkt = *tx_pkts++;
+ RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+ ol_flags = tx_pkt->ol_flags;
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+
+ /* Calculate the number of context descriptors needed. */
+ nb_ctx = avf_calc_context_desc(ol_flags);
+
+ /* The number of descriptors that must be allocated for
+ * a packet equals to the number of the segments of that
+ * packet plus 1 context descriptor if needed.
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+ tx_last = (uint16_t)(tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
+ " tx_first=%u tx_last=%u",
+ txq->port_id, txq->queue_id, tx_id, tx_last);
+
+ if (nb_used > txq->nb_free) {
+ if (avf_xmit_cleanup(txq)) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ if (unlikely(nb_used > txq->rs_thresh)) {
+ while (nb_used > txq->nb_free) {
+ if (avf_xmit_cleanup(txq)) {
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /* Descriptor based VLAN insertion */
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ td_cmd |= AVF_TX_DESC_CMD_IL2TAG1;
+ td_tag = tx_pkt->vlan_tci;
+ }
+
+ /* According to datasheet, the bit2 is reserved and must be
+ * set to 1.
+ */
+ td_cmd |= 0x04;
+
+ /* Enable checksum offloading */
+ if (ol_flags & AVF_TX_CKSUM_OFFLOAD_MASK)
+ avf_txd_enable_checksum(ol_flags, &td_cmd,
+ &td_offset, tx_offload);
+
+ if (nb_ctx) {
+ /* Setup TX context descriptor if required */
+ volatile struct avf_tx_context_desc *ctx_txd =
+ (volatile struct avf_tx_context_desc *)
+ &txr[tx_id];
+ uint16_t cd_l2tag2 = 0;
+ uint64_t cd_type_cmd_tso_mss =
+ AVF_TX_DESC_DTYPE_CONTEXT;
+
+ txn = &sw_ring[txe->next_id];
+ RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+ if (txe->mbuf) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ /* TSO enabled */
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cd_type_cmd_tso_mss |=
+ avf_set_tso_ctx(tx_pkt, tx_offload);
+
+ AVF_DUMP_TX_DESC(txq, ctx_txd, tx_id);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+
+ if (txe->mbuf)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /* Setup TX Descriptor */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+ txd->cmd_type_offset_bsz = avf_build_ctob(td_cmd,
+ td_offset,
+ slen,
+ td_tag);
+
+ AVF_DUMP_TX_DESC(txq, txd, tx_id);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg);
+
+ /* The last packet data descriptor needs End Of Packet (EOP) */
+ td_cmd |= AVF_TX_DESC_CMD_EOP;
+ txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+
+ if (txq->nb_used >= txq->rs_thresh) {
+ PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
+ "%4u (port=%d queue=%d)",
+ tx_last, txq->port_id, txq->queue_id);
+
+ td_cmd |= AVF_TX_DESC_CMD_RS;
+
+ /* Update txq RS bit counters */
+ txq->nb_used = 0;
+ }
+
+ txd->cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)td_cmd) <<
+ AVF_TXD_QW1_CMD_SHIFT);
+ AVF_DUMP_TX_DESC(txq, txd, tx_id);
+ }
+
+end_of_tx:
+ rte_wmb();
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ txq->port_id, txq->queue_id, tx_id, nb_tx);
+
+ AVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+static uint16_t
+avf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ ret = avf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+/* TX prep functions */
+uint16_t
+avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Check condition for nb_segs > AVF_TX_MAX_MTU_SEG. */
+ if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (m->nb_segs > AVF_TX_MAX_MTU_SEG) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+ } else if ((m->tso_segsz < AVF_MIN_TSO_MSS) ||
+ (m->tso_segsz > AVF_MAX_TSO_MSS)) {
+ /* MSS outside the range are considered malicious */
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & AVF_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/* choose rx function*/
+void
+avf_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_rx_queue *rxq;
+ int i;
+
+ if (adapter->rx_vec_allowed) {
+ if (dev->data->scattered_rx) {
+ PMD_DRV_LOG(DEBUG, "Using Vector Scattered Rx callback"
+ " (port=%d).", dev->data->port_id);
+ dev->rx_pkt_burst = avf_recv_scattered_pkts_vec;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using Vector Rx callback"
+ " (port=%d).", dev->data->port_id);
+ dev->rx_pkt_burst = avf_recv_pkts_vec;
+ }
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq)
+ continue;
+ avf_rxq_vec_setup(rxq);
+ }
+ } else if (dev->data->scattered_rx) {
+ PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = avf_recv_scattered_pkts;
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = avf_recv_pkts_bulk_alloc;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = avf_recv_pkts;
+ }
+}
+
+/* choose tx function*/
+void
+avf_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_tx_queue *txq;
+ int i;
+
+ if (adapter->tx_vec_allowed) {
+ PMD_DRV_LOG(DEBUG, "Using Vector Tx callback (port=%d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst = avf_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+ avf_txq_vec_setup(txq);
+ }
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst = avf_xmit_pkts;
+ dev->tx_pkt_prepare = avf_prep_pkts;
+ }
+}
+
+void
+avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct avf_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = TRUE;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct avf_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_free_thresh = txq->free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->rs_thresh;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
+/* Get the number of used descriptors of a rx queue */
+uint32_t
+avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+#define AVF_RXQ_SCAN_INTERVAL 4
+ volatile union avf_rx_desc *rxdp;
+ struct avf_rx_queue *rxq;
+ uint16_t desc = 0;
+
+ rxq = dev->data->rx_queues[queue_id];
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ while ((desc < rxq->nb_rx_desc) &&
+ ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
+ AVF_RXD_QW1_STATUS_MASK) >> AVF_RXD_QW1_STATUS_SHIFT) &
+ (1 << AVF_RX_DESC_STATUS_DD_SHIFT)) {
+ /* Check the DD bit of a rx descriptor of each 4 in a group,
+ * to avoid checking too frequently and downgrading performance
+ * too much.
+ */
+ desc += AVF_RXQ_SCAN_INTERVAL;
+ rxdp += AVF_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+avf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
+{
+ struct avf_rx_queue *rxq = rx_queue;
+ volatile uint64_t *status;
+ uint64_t mask;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+ mask = rte_le_to_cpu_64((1ULL << AVF_RX_DESC_STATUS_DD_SHIFT)
+ << AVF_RXD_QW1_STATUS_SHIFT);
+ if (*status & mask)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+avf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
+{
+ struct avf_tx_queue *txq = tx_queue;
+ volatile uint64_t *status;
+ uint64_t mask, expect;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
+ txq->rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ mask = rte_le_to_cpu_64(AVF_TXD_QW1_DTYPE_MASK);
+ expect = rte_cpu_to_le_64(
+ AVF_TX_DESC_DTYPE_DESC_DONE << AVF_TXD_QW1_DTYPE_SHIFT);
+ if ((*status & mask) == expect)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+uint16_t __attribute__((weak))
+avf_recv_pkts_vec(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+avf_rxq_vec_setup(__rte_unused struct avf_rx_queue *rxq)
+{
+ return -1;
+}
+
+int __attribute__((weak))
+avf_txq_vec_setup(__rte_unused struct avf_tx_queue *txq)
+{
+ return -1;
+}
diff --git a/drivers/net/avf/avf_rxtx.h b/drivers/net/avf/avf_rxtx.h
new file mode 100644
index 00000000..d1701cd6
--- /dev/null
+++ b/drivers/net/avf/avf_rxtx.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_RXTX_H_
+#define _AVF_RXTX_H_
+
+/* In QLEN must be whole number of 32 descriptors. */
+#define AVF_ALIGN_RING_DESC 32
+#define AVF_MIN_RING_DESC 64
+#define AVF_MAX_RING_DESC 4096
+#define AVF_DMA_MEM_ALIGN 4096
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define AVF_RING_BASE_ALIGN 128
+
+/* used for Rx Bulk Allocate */
+#define AVF_RX_MAX_BURST 32
+
+/* used for Vector PMD */
+#define AVF_VPMD_RX_MAX_BURST 32
+#define AVF_VPMD_TX_MAX_BURST 32
+#define AVF_VPMD_DESCS_PER_LOOP 4
+#define AVF_VPMD_TX_MAX_FREE_BUF 64
+
+#define AVF_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
+ ETH_TXQ_FLAGS_NOOFFLOADS)
+
+#define DEFAULT_TX_RS_THRESH 32
+#define DEFAULT_TX_FREE_THRESH 32
+
+#define AVF_MIN_TSO_MSS 256
+#define AVF_MAX_TSO_MSS 9668
+#define AVF_TSO_MAX_SEG UINT8_MAX
+#define AVF_TX_MAX_MTU_SEG 8
+
+#define AVF_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define AVF_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define AVF_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ AVF_TX_OFFLOAD_MASK)
+
+/* HW desc structure, both 16-byte and 32-byte types are supported */
+#ifdef RTE_LIBRTE_AVF_16BYTE_RX_DESC
+#define avf_rx_desc avf_16byte_rx_desc
+#else
+#define avf_rx_desc avf_32byte_rx_desc
+#endif
+
+struct avf_rxq_ops {
+ void (*release_mbufs)(struct avf_rx_queue *rxq);
+};
+
+struct avf_txq_ops {
+ void (*release_mbufs)(struct avf_tx_queue *txq);
+};
+
+/* Structure associated with each Rx queue. */
+struct avf_rx_queue {
+ struct rte_mempool *mp; /* mbuf pool to populate Rx ring */
+ const struct rte_memzone *mz; /* memzone for Rx ring */
+ volatile union avf_rx_desc *rx_ring; /* Rx ring virtual address */
+ uint64_t rx_ring_phys_addr; /* Rx ring DMA address */
+ struct rte_mbuf **sw_ring; /* address of SW ring */
+ uint16_t nb_rx_desc; /* ring length */
+ uint16_t rx_tail; /* current value of tail */
+ volatile uint8_t *qrx_tail; /* register address of tail */
+ uint16_t rx_free_thresh; /* max free RX desc to hold */
+ uint16_t nb_rx_hold; /* number of held free RX desc */
+ struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
+ struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
+ struct rte_mbuf fake_mbuf; /* dummy mbuf */
+
+ /* used for VPMD */
+ uint16_t rxrearm_nb; /* number of remaining to be re-armed */
+ uint16_t rxrearm_start; /* the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /* value to init mbufs */
+
+ /* for rx bulk */
+ uint16_t rx_nb_avail; /* number of staged packets ready */
+ uint16_t rx_next_avail; /* index of next staged packets */
+ uint16_t rx_free_trigger; /* triggers rx buffer allocation */
+ struct rte_mbuf *rx_stage[AVF_RX_MAX_BURST * 2]; /* store mbuf */
+
+ uint16_t port_id; /* device port ID */
+ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+ uint16_t queue_id; /* Rx queue index */
+ uint16_t rx_buf_len; /* The packet buffer size */
+ uint16_t rx_hdr_len; /* The header buffer size */
+ uint16_t max_pkt_len; /* Maximum packet length */
+
+ bool q_set; /* if rx queue has been configured */
+ bool rx_deferred_start; /* don't start this queue in dev start */
+ const struct avf_rxq_ops *ops;
+};
+
+struct avf_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint16_t next_id;
+ uint16_t last_id;
+};
+
+/* Structure associated with each TX queue. */
+struct avf_tx_queue {
+ const struct rte_memzone *mz; /* memzone for Tx ring */
+ volatile struct avf_tx_desc *tx_ring; /* Tx ring virtual address */
+ uint64_t tx_ring_phys_addr; /* Tx ring DMA address */
+ struct avf_tx_entry *sw_ring; /* address array of SW ring */
+ uint16_t nb_tx_desc; /* ring length */
+ uint16_t tx_tail; /* current value of tail */
+ volatile uint8_t *qtx_tail; /* register address of tail */
+ /* number of used desc since RS bit set */
+ uint16_t nb_used;
+ uint16_t nb_free;
+ uint16_t last_desc_cleaned; /* last desc have been cleaned*/
+ uint16_t free_thresh;
+ uint16_t rs_thresh;
+
+ uint16_t port_id;
+ uint16_t queue_id;
+ uint32_t txq_flags;
+ uint16_t next_dd; /* next to set RS, for VPMD */
+ uint16_t next_rs; /* next to check DD, for VPMD */
+
+ bool q_set; /* if rx queue has been configured */
+ bool tx_deferred_start; /* don't start this queue in dev start */
+ const struct avf_txq_ops *ops;
+};
+
+/* Offload features */
+union avf_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l2_len:7; /* L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /* L3 (IP) Header Length. */
+ uint64_t l4_len:8; /* L4 Header Length. */
+ uint64_t tso_segsz:16; /* TCP TSO segment size */
+ /* uint64_t unused : 24; */
+ };
+};
+
+int avf_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+
+int avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void avf_dev_rx_queue_release(void *rxq);
+
+int avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+int avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void avf_dev_tx_queue_release(void *txq);
+void avf_stop_queues(struct rte_eth_dev *dev);
+uint16_t avf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t avf_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t avf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+void avf_set_rx_function(struct rte_eth_dev *dev);
+void avf_set_tx_function(struct rte_eth_dev *dev);
+void avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+void avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+uint32_t avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
+int avf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
+int avf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
+
+uint16_t avf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t avf_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t avf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int avf_rxq_vec_setup(struct avf_rx_queue *rxq);
+int avf_txq_vec_setup(struct avf_tx_queue *txq);
+
+static inline
+void avf_dump_rx_descriptor(struct avf_rx_queue *rxq,
+ const void *desc,
+ uint16_t rx_id)
+{
+#ifdef RTE_LIBRTE_AVF_16BYTE_RX_DESC
+ const union avf_16byte_rx_desc *rx_desc = desc;
+
+ printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
+ rxq->queue_id, rx_id, rx_desc->read.pkt_addr,
+ rx_desc->read.hdr_addr);
+#else
+ const union avf_32byte_rx_desc *rx_desc = desc;
+
+ printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
+ " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
+ rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
+ rx_desc->read.rsvd1, rx_desc->read.rsvd2);
+#endif
+}
+
+/* All the descriptors are 16 bytes, so just use one of them
+ * to print the qwords
+ */
+static inline
+void avf_dump_tx_descriptor(const struct avf_tx_queue *txq,
+ const void *desc, uint16_t tx_id)
+{
+ char *name;
+ const struct avf_tx_desc *tx_desc = desc;
+ enum avf_tx_desc_dtype_value type;
+
+ type = (enum avf_tx_desc_dtype_value)rte_le_to_cpu_64(
+ tx_desc->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK));
+ switch (type) {
+ case AVF_TX_DESC_DTYPE_DATA:
+ name = "Tx_data_desc";
+ break;
+ case AVF_TX_DESC_DTYPE_CONTEXT:
+ name = "Tx_context_desc";
+ break;
+ default:
+ name = "unknown_desc";
+ break;
+ }
+
+ printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
+ txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+ tx_desc->cmd_type_offset_bsz);
+}
+
+#ifdef DEBUG_DUMP_DESC
+#define AVF_DUMP_RX_DESC(rxq, desc, rx_id) \
+ avf_dump_rx_descriptor(rxq, desc, rx_id)
+#define AVF_DUMP_TX_DESC(txq, desc, tx_id) \
+ avf_dump_tx_descriptor(txq, desc, tx_id)
+#else
+#define AVF_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0)
+#define AVF_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0)
+#endif
+
+#endif /* _AVF_RXTX_H_ */
diff --git a/drivers/net/avf/avf_rxtx_vec_common.h b/drivers/net/avf/avf_rxtx_vec_common.h
new file mode 100644
index 00000000..8057b968
--- /dev/null
+++ b/drivers/net/avf/avf_rxtx_vec_common.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_RXTX_VEC_COMMON_H_
+#define _AVF_RXTX_VEC_COMMON_H_
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "avf.h"
+#include "avf_rxtx.h"
+
+static inline uint16_t
+reassemble_packets(struct avf_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[AVF_VPMD_RX_MAX_BURST];
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned int pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len) {
+ end->data_len -= rxq->crc_len;
+ } else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ }
+ pkts[pkt_idx++] = start;
+ start = NULL;
+ end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+static __rte_always_inline int
+avf_tx_free_bufs(struct avf_tx_queue *txq)
+{
+ struct avf_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[AVF_VPMD_TX_MAX_FREE_BUF];
+
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->next_dd - (n - 1)];
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh);
+ txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh);
+ if (txq->next_dd >= txq->nb_tx_desc)
+ txq->next_dd = (uint16_t)(txq->rs_thresh - 1);
+
+ return txq->rs_thresh;
+}
+
+static __rte_always_inline void
+tx_backlog_entry(struct avf_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+static inline void
+_avf_rx_queue_release_mbufs_vec(struct avf_rx_queue *rxq)
+{
+ const unsigned int mask = rxq->nb_rx_desc - 1;
+ unsigned int i;
+
+ if (!rxq->sw_ring || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i])
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ } else {
+ for (i = rxq->rx_tail;
+ i != rxq->rxrearm_start;
+ i = (i + 1) & mask) {
+ if (rxq->sw_ring[i])
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ }
+
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static inline void
+_avf_tx_queue_release_mbufs_vec(struct avf_tx_queue *txq)
+{
+ unsigned i;
+ const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
+
+ if (!txq->sw_ring || txq->nb_free == max_desc)
+ return;
+
+ i = txq->next_dd - txq->rs_thresh + 1;
+ if (txq->tx_tail < i) {
+ for (; i < txq->nb_tx_desc; i++) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ i = 0;
+ }
+}
+
+static inline int
+avf_rxq_vec_setup_default(struct avf_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+#endif
diff --git a/drivers/net/avf/avf_rxtx_vec_sse.c b/drivers/net/avf/avf_rxtx_vec_sse.c
new file mode 100644
index 00000000..8275100f
--- /dev/null
+++ b/drivers/net/avf/avf_rxtx_vec_sse.c
@@ -0,0 +1,656 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "base/avf_prototype.h"
+#include "base/avf_type.h"
+#include "avf.h"
+#include "avf_rxtx.h"
+#include "avf_rxtx_vec_common.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+avf_rxq_rearm(struct avf_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+
+ volatile union avf_rx_desc *rxdp;
+ struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp, (void *)rxp,
+ rxq->rx_free_thresh) < 0) {
+ if (rxq->rxrearm_nb + rxq->rx_free_thresh >= rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < AVF_VPMD_DESCS_PER_LOOP; i++) {
+ rxp[i] = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < rxq->rx_free_thresh; i += 2, rxp += 2) {
+ __m128i vaddr0, vaddr1;
+
+ mb0 = rxp[0];
+ mb1 = rxp[1];
+
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += rxq->rx_free_thresh;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= rxq->rx_free_thresh;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "rearm_start=%u rearm_nb=%u",
+ rxq->port_id, rxq->queue_id,
+ rx_id, rxq->rxrearm_start, rxq->rxrearm_nb);
+
+ /* Update the tail pointer on the NIC */
+ AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+static inline void
+desc_to_olflags_v(struct avf_rx_queue *rxq, __m128i descs[4],
+ struct rte_mbuf **rx_pkts)
+{
+ const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
+ __m128i vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const __m128i rss_vlan_msk = _mm_set_epi32(
+ 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
+
+ const __m128i cksum_mask = _mm_set_epi32(
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD);
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ 0, 0, 0, 0);
+
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
+ 0, 0, PKT_RX_FDIR, 0);
+
+ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+
+ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
+
+ vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
+ vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
+
+ rss = _mm_srli_epi32(vlan1, 11);
+ rss = _mm_shuffle_epi8(rss_flags, rss);
+
+ l3_l4e = _mm_srli_epi32(vlan1, 22);
+ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
+ /* then we shift left 1 bit */
+ l3_l4e = _mm_slli_epi32(l3_l4e, 1);
+ /* we need to mask out the reduntant bits */
+ l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
+
+ vlan0 = _mm_or_si128(vlan0, rss);
+ vlan0 = _mm_or_si128(vlan0, l3_l4e);
+
+ /* At this point, we have the 4 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+
+ /* write the rearm data and the olflags in one write */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline void
+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
+ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ /* [2] - [21] reserved */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ /* All others reserved */
+ };
+
+ ptype0 = _mm_srli_epi64(ptype0, 30);
+ ptype1 = _mm_srli_epi64(ptype1, 30);
+
+ rx_pkts[0]->packet_type = type_table[_mm_extract_epi8(ptype0, 0)];
+ rx_pkts[1]->packet_type = type_table[_mm_extract_epi8(ptype0, 8)];
+ rx_pkts[2]->packet_type = type_table[_mm_extract_epi8(ptype1, 0)];
+ rx_pkts[3]->packet_type = type_table[_mm_extract_epi8(ptype1, 8)];
+}
+
+/* Notice:
+ * - nb_pkts < AVF_VPMD_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > AVF_VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct avf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union avf_rx_desc *rxdp;
+ struct rte_mbuf **sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ /* compile-time check the above crc_adjust layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi16
+ * call above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ __m128i dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than AVF_VPMD_RX_MAX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, AVF_VPMD_RX_MAX_BURST);
+
+ /* nb_pkts has to be floor-aligned to AVF_VPMD_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, AVF_VPMD_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > rxq->rx_free_thresh)
+ avf_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, 0xFF, 0xFF /* pkt_type set as unknown */
+ );
+ /* Compile-time verify the shuffle mask
+ * NOTE: some field positions already verified above, but duplicated
+ * here for completeness in case of future modifications.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += AVF_VPMD_DESCS_PER_LOOP,
+ rxdp += AVF_VPMD_DESCS_PER_LOOP) {
+ __m128i descs[AVF_VPMD_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
+
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
+#endif
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+#if defined(RTE_ARCH_X86_64)
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2);
+#endif
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
+ const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
+ descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* C.1 4=>2 status err info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
+ const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
+ descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.2 get 4 pkts status err value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128(
+ (void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128(
+ (void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += AVF_VPMD_DESCS_PER_LOOP;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128(
+ (void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ desc_to_ptype_v(descs, &rx_pkts[pos]);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != AVF_VPMD_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/* Notice:
+ * - nb_pkts < AVF_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > AVF_VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+avf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < AVF_VPMD_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+avf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avf_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[AVF_VPMD_RX_MAX_BURST] = {0};
+ unsigned int i = 0;
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (!rxq->pkt_first_seg &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct avf_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw =
+ (AVF_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << AVF_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len <<
+ AVF_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_iova + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+avf_vtx(volatile struct avf_tx_desc *txdp, struct rte_mbuf **pkt,
+ uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+avf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue;
+ volatile struct avf_tx_desc *txdp;
+ struct avf_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = AVF_TX_DESC_CMD_EOP | 0x04; /* bit 2 must be set */
+ uint64_t rs = AVF_TX_DESC_CMD_RS | flags;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh);
+
+ if (txq->nb_free < txq->free_thresh)
+ avf_tx_free_bufs(txq);
+
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+ nb_commit = nb_pkts;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->next_rs = (uint16_t)(txq->rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ avf_vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->next_rs) {
+ txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)AVF_TX_DESC_CMD_RS) <<
+ AVF_TXD_QW1_CMD_SHIFT);
+ txq->next_rs =
+ (uint16_t)(txq->next_rs + txq->rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_pkts=%u",
+ txq->port_id, txq->queue_id, tx_id, nb_pkts);
+
+ AVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+avf_rx_queue_release_mbufs_sse(struct avf_rx_queue *rxq)
+{
+ _avf_rx_queue_release_mbufs_vec(rxq);
+}
+
+static void __attribute__((cold))
+avf_tx_queue_release_mbufs_sse(struct avf_tx_queue *txq)
+{
+ _avf_tx_queue_release_mbufs_vec(txq);
+}
+
+static const struct avf_rxq_ops sse_vec_rxq_ops = {
+ .release_mbufs = avf_rx_queue_release_mbufs_sse,
+};
+
+static const struct avf_txq_ops sse_vec_txq_ops = {
+ .release_mbufs = avf_tx_queue_release_mbufs_sse,
+};
+
+int __attribute__((cold))
+avf_txq_vec_setup(struct avf_tx_queue *txq)
+{
+ txq->ops = &sse_vec_txq_ops;
+ return 0;
+}
+
+int __attribute__((cold))
+avf_rxq_vec_setup(struct avf_rx_queue *rxq)
+{
+ rxq->ops = &sse_vec_rxq_ops;
+ return avf_rxq_vec_setup_default(rxq);
+}
diff --git a/drivers/net/avf/avf_vchnl.c b/drivers/net/avf/avf_vchnl.c
new file mode 100644
index 00000000..fa71014e
--- /dev/null
+++ b/drivers/net/avf/avf_vchnl.c
@@ -0,0 +1,812 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_dev.h>
+
+#include "avf_log.h"
+#include "base/avf_prototype.h"
+#include "base/avf_adminq_cmd.h"
+#include "base/avf_type.h"
+
+#include "avf.h"
+#include "avf_rxtx.h"
+
+#define MAX_TRY_TIMES 200
+#define ASQ_DELAY_MS 10
+
+/* Read data in admin queue to get msg from pf driver */
+static enum avf_status_code
+avf_read_msg_from_pf(struct avf_adapter *adapter, uint16_t buf_len,
+ uint8_t *buf)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_arq_event_info event;
+ enum virtchnl_ops opcode;
+ int ret;
+
+ event.buf_len = buf_len;
+ event.msg_buf = buf;
+ ret = avf_clean_arq_element(hw, &event, NULL);
+ /* Can't read any msg from adminQ */
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "Can't read msg from AQ");
+ return ret;
+ }
+
+ opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
+ vf->cmd_retval = (enum virtchnl_status_code)rte_le_to_cpu_32(
+ event.desc.cookie_low);
+
+ PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d",
+ opcode, vf->cmd_retval);
+
+ if (opcode != vf->pend_cmd)
+ PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
+ vf->pend_cmd, opcode);
+
+ return AVF_SUCCESS;
+}
+
+static int
+avf_execute_vf_cmd(struct avf_adapter *adapter, struct avf_cmd_info *args)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_arq_event_info event_info;
+ enum avf_status_code ret;
+ int err = 0;
+ int i = 0;
+
+ if (_atomic_set_cmd(vf, args->ops))
+ return -1;
+
+ ret = avf_aq_send_msg_to_pf(hw, args->ops, AVF_SUCCESS,
+ args->in_args, args->in_args_size, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
+ _clear_cmd(vf);
+ return err;
+ }
+
+ switch (args->ops) {
+ case VIRTCHNL_OP_RESET_VF:
+ /*no need to wait for response */
+ _clear_cmd(vf);
+ break;
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ /* for init virtchnl ops, need to poll the response */
+ do {
+ ret = avf_read_msg_from_pf(adapter, args->out_size,
+ args->out_buffer);
+ if (ret == AVF_SUCCESS)
+ break;
+ rte_delay_ms(ASQ_DELAY_MS);
+ } while (i++ < MAX_TRY_TIMES);
+ if (i >= MAX_TRY_TIMES ||
+ vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+ err = -1;
+ PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+ " for cmd %d", vf->cmd_retval, args->ops);
+ }
+ _clear_cmd(vf);
+ break;
+
+ default:
+ /* For other virtchnl ops in running time,
+ * wait for the cmd done flag.
+ */
+ do {
+ if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN)
+ break;
+ rte_delay_ms(ASQ_DELAY_MS);
+ /* If don't read msg or read sys event, continue */
+ } while (i++ < MAX_TRY_TIMES);
+ /* If there's no response is received, clear command */
+ if (i >= MAX_TRY_TIMES ||
+ vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+ err = -1;
+ PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+ " for cmd %d", vf->cmd_retval, args->ops);
+ _clear_cmd(vf);
+ }
+ break;
+ }
+
+ return err;
+}
+
+static void
+avf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
+ uint16_t msglen)
+{
+ struct virtchnl_pf_event *pf_msg =
+ (struct virtchnl_pf_event *)msg;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (msglen < sizeof(struct virtchnl_pf_event)) {
+ PMD_DRV_LOG(DEBUG, "Error event");
+ return;
+ }
+ switch (pf_msg->event) {
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ break;
+ case VIRTCHNL_EVENT_LINK_CHANGE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
+ vf->link_up = pf_msg->event_data.link_event.link_status;
+ vf->link_speed = pf_msg->event_data.link_event.link_speed;
+ avf_dev_link_update(dev, 0);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ break;
+ case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
+ break;
+ default:
+ PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
+ break;
+ }
+}
+
+void
+avf_handle_virtchnl_msg(struct rte_eth_dev *dev)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct avf_arq_event_info info;
+ uint16_t pending, aq_opc;
+ enum virtchnl_ops msg_opc;
+ enum avf_status_code msg_ret;
+ int ret;
+
+ info.buf_len = AVF_AQ_BUF_SZ;
+ if (!vf->aq_resp) {
+ PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
+ return;
+ }
+ info.msg_buf = vf->aq_resp;
+
+ pending = 1;
+ while (pending) {
+ ret = avf_clean_arq_element(hw, &info, &pending);
+
+ if (ret != AVF_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
+ "ret: %d", ret);
+ break;
+ }
+ aq_opc = rte_le_to_cpu_16(info.desc.opcode);
+ /* For the message sent from pf to vf, opcode is stored in
+ * cookie_high of struct avf_aq_desc, while return error code
+ * are stored in cookie_low, Which is done by PF driver.
+ */
+ msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
+ info.desc.cookie_high);
+ msg_ret = (enum avf_status_code)rte_le_to_cpu_32(
+ info.desc.cookie_low);
+ switch (aq_opc) {
+ case avf_aqc_opc_send_msg_to_vf:
+ if (msg_opc == VIRTCHNL_OP_EVENT) {
+ avf_handle_pf_event_msg(dev, info.msg_buf,
+ info.msg_len);
+ } else {
+ /* read message and it's expected one */
+ if (msg_opc == vf->pend_cmd) {
+ vf->cmd_retval = msg_ret;
+ /* prevent compiler reordering */
+ rte_compiler_barrier();
+ _clear_cmd(vf);
+ } else
+ PMD_DRV_LOG(ERR, "command mismatch,"
+ "expect %u, get %u",
+ vf->pend_cmd, msg_opc);
+ PMD_DRV_LOG(DEBUG,
+ "adminq response is received,"
+ " opcode = %d", msg_opc);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ aq_opc);
+ break;
+ }
+ }
+}
+
+int
+avf_enable_vlan_strip(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ ret = avf_execute_vf_cmd(adapter, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of"
+ " OP_ENABLE_VLAN_STRIPPING");
+
+ return ret;
+}
+
+int
+avf_disable_vlan_strip(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_cmd_info args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ ret = avf_execute_vf_cmd(adapter, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of"
+ " OP_DISABLE_VLAN_STRIPPING");
+
+ return ret;
+}
+
+#define VIRTCHNL_VERSION_MAJOR_START 1
+#define VIRTCHNL_VERSION_MINOR_START 1
+
+/* Check API version with sync wait until version read from admin queue */
+int
+avf_check_api_version(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_version_info version, *pver;
+ struct avf_cmd_info args;
+ int err;
+
+ version.major = VIRTCHNL_VERSION_MAJOR;
+ version.minor = VIRTCHNL_VERSION_MINOR;
+
+ args.ops = VIRTCHNL_OP_VERSION;
+ args.in_args = (uint8_t *)&version;
+ args.in_args_size = sizeof(version);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
+ return err;
+ }
+
+ pver = (struct virtchnl_version_info *)args.out_buffer;
+ vf->virtchnl_version = *pver;
+
+ if (vf->virtchnl_version.major < VIRTCHNL_VERSION_MAJOR_START ||
+ (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START &&
+ vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) {
+ PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower"
+ " than (%u.%u) to support Adapative VF",
+ VIRTCHNL_VERSION_MAJOR_START,
+ VIRTCHNL_VERSION_MAJOR_START);
+ return -1;
+ } else if (vf->virtchnl_version.major > VIRTCHNL_VERSION_MAJOR ||
+ (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR &&
+ vf->virtchnl_version.minor > VIRTCHNL_VERSION_MINOR)) {
+ PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
+ vf->virtchnl_version.major,
+ vf->virtchnl_version.minor,
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
+ return -1;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Peer is supported PF host");
+ return 0;
+}
+
+int
+avf_get_vf_resource(struct avf_adapter *adapter)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_cmd_info args;
+ uint32_t caps, len;
+ int err, i;
+
+ args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ /* TODO: basic offload capabilities, need to
+ * add advanced/optional offload capabilities
+ */
+
+ caps = AVF_BASIC_OFFLOAD_CAPS;
+
+ args.in_args = (uint8_t *)&caps;
+ args.in_args_size = sizeof(caps);
+
+ err = avf_execute_vf_cmd(adapter, &args);
+
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_GET_VF_RESOURCE");
+ return -1;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource) +
+ AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
+
+ rte_memcpy(vf->vf_res, args.out_buffer,
+ RTE_MIN(args.out_size, len));
+ /* parse VF config message back from PF*/
+ avf_parse_hw_config(hw, vf->vf_res);
+ for (i = 0; i < vf->vf_res->num_vsis; i++) {
+ if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
+ vf->vsi_res = &vf->vf_res->vsi_res[i];
+ }
+
+ if (!vf->vsi_res) {
+ PMD_INIT_LOG(ERR, "no LAN VSI found");
+ return -1;
+ }
+
+ vf->vsi.vsi_id = vf->vsi_res->vsi_id;
+ vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
+ vf->vsi.adapter = adapter;
+
+ return 0;
+}
+
+int
+avf_enable_queues(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select queue_select;
+ struct avf_cmd_info args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+
+ queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
+ queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_ENABLE_QUEUES");
+ return err;
+ }
+ return 0;
+}
+
+int
+avf_disable_queues(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select queue_select;
+ struct avf_cmd_info args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+
+ queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
+ queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_DISABLE_QUEUES");
+ return err;
+ }
+ return 0;
+}
+
+int
+avf_switch_queue(struct avf_adapter *adapter, uint16_t qid,
+ bool rx, bool on)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select queue_select;
+ struct avf_cmd_info args;
+ int err;
+
+ memset(&queue_select, 0, sizeof(queue_select));
+ queue_select.vsi_id = vf->vsi_res->vsi_id;
+ if (rx)
+ queue_select.rx_queues |= 1 << qid;
+ else
+ queue_select.tx_queues |= 1 << qid;
+
+ if (on)
+ args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
+ else
+ args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)&queue_select;
+ args.in_args_size = sizeof(queue_select);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of %s",
+ on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
+ return err;
+}
+
+int
+avf_configure_rss_lut(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_rss_lut *rss_lut;
+ struct avf_cmd_info args;
+ int len, err = 0;
+
+ len = sizeof(*rss_lut) + vf->vf_res->rss_lut_size - 1;
+ rss_lut = rte_zmalloc("rss_lut", len, 0);
+ if (!rss_lut)
+ return -ENOMEM;
+
+ rss_lut->vsi_id = vf->vsi_res->vsi_id;
+ rss_lut->lut_entries = vf->vf_res->rss_lut_size;
+ rte_memcpy(rss_lut->lut, vf->rss_lut, vf->vf_res->rss_lut_size);
+
+ args.ops = VIRTCHNL_OP_CONFIG_RSS_LUT;
+ args.in_args = (u8 *)rss_lut;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_CONFIG_RSS_LUT");
+
+ rte_free(rss_lut);
+ return err;
+}
+
+int
+avf_configure_rss_key(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_rss_key *rss_key;
+ struct avf_cmd_info args;
+ int len, err = 0;
+
+ len = sizeof(*rss_key) + vf->vf_res->rss_key_size - 1;
+ rss_key = rte_zmalloc("rss_key", len, 0);
+ if (!rss_key)
+ return -ENOMEM;
+
+ rss_key->vsi_id = vf->vsi_res->vsi_id;
+ rss_key->key_len = vf->vf_res->rss_key_size;
+ rte_memcpy(rss_key->key, vf->rss_key, vf->vf_res->rss_key_size);
+
+ args.ops = VIRTCHNL_OP_CONFIG_RSS_KEY;
+ args.in_args = (u8 *)rss_key;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_CONFIG_RSS_KEY");
+
+ rte_free(rss_key);
+ return err;
+}
+
+int
+avf_configure_queues(struct avf_adapter *adapter)
+{
+ struct avf_rx_queue **rxq =
+ (struct avf_rx_queue **)adapter->eth_dev->data->rx_queues;
+ struct avf_tx_queue **txq =
+ (struct avf_tx_queue **)adapter->eth_dev->data->tx_queues;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_vsi_queue_config_info *vc_config;
+ struct virtchnl_queue_pair_info *vc_qp;
+ struct avf_cmd_info args;
+ uint16_t i, size;
+ int err;
+
+ size = sizeof(*vc_config) +
+ sizeof(vc_config->qpair[0]) * vf->num_queue_pairs;
+ vc_config = rte_zmalloc("cfg_queue", size, 0);
+ if (!vc_config)
+ return -ENOMEM;
+
+ vc_config->vsi_id = vf->vsi_res->vsi_id;
+ vc_config->num_queue_pairs = vf->num_queue_pairs;
+
+ for (i = 0, vc_qp = vc_config->qpair;
+ i < vf->num_queue_pairs;
+ i++, vc_qp++) {
+ vc_qp->txq.vsi_id = vf->vsi_res->vsi_id;
+ vc_qp->txq.queue_id = i;
+ /* Virtchnnl configure queues by pairs */
+ if (i < adapter->eth_dev->data->nb_tx_queues) {
+ vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
+ vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
+ }
+ vc_qp->rxq.vsi_id = vf->vsi_res->vsi_id;
+ vc_qp->rxq.queue_id = i;
+ vc_qp->rxq.max_pkt_size = vf->max_pkt_len;
+ /* Virtchnnl configure queues by pairs */
+ if (i < adapter->eth_dev->data->nb_rx_queues) {
+ vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
+ vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
+ vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
+ }
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ args.in_args = (uint8_t *)vc_config;
+ args.in_args_size = size;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of"
+ " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+
+ rte_free(vc_config);
+ return err;
+}
+
+int
+avf_config_irq_map(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_irq_map_info *map_info;
+ struct virtchnl_vector_map *vecmap;
+ struct avf_cmd_info args;
+ uint32_t vector_id;
+ int len, i, err;
+
+ len = sizeof(struct virtchnl_irq_map_info) +
+ sizeof(struct virtchnl_vector_map) * vf->nb_msix;
+
+ map_info = rte_zmalloc("map_info", len, 0);
+ if (!map_info)
+ return -ENOMEM;
+
+ map_info->num_vectors = vf->nb_msix;
+ for (i = 0; i < vf->nb_msix; i++) {
+ vecmap = &map_info->vecmap[i];
+ vecmap->vsi_id = vf->vsi_res->vsi_id;
+ vecmap->rxitr_idx = AVF_ITR_INDEX_DEFAULT;
+ vecmap->vector_id = vf->msix_base + i;
+ vecmap->txq_map = 0;
+ vecmap->rxq_map = vf->rxq_map[vf->msix_base + i];
+ }
+
+ args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ args.in_args = (u8 *)map_info;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
+
+ rte_free(map_info);
+ return err;
+}
+
+void
+avf_add_del_all_mac_addr(struct avf_adapter *adapter, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct ether_addr *addr;
+ struct avf_cmd_info args;
+ int len, err, i, j;
+ int next_begin = 0;
+ int begin = 0;
+
+ do {
+ j = 0;
+ len = sizeof(struct virtchnl_ether_addr_list);
+ for (i = begin; i < AVF_NUM_MACADDR_MAX; i++, next_begin++) {
+ addr = &adapter->eth_dev->data->mac_addrs[i];
+ if (is_zero_ether_addr(addr))
+ continue;
+ len += sizeof(struct virtchnl_ether_addr);
+ if (len >= AVF_AQ_BUF_SZ) {
+ next_begin = i + 1;
+ break;
+ }
+ }
+
+ list = rte_zmalloc("avf_del_mac_buffer", len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return;
+ }
+
+ for (i = begin; i < next_begin; i++) {
+ addr = &adapter->eth_dev->data->mac_addrs[i];
+ if (is_zero_ether_addr(addr))
+ continue;
+ rte_memcpy(list->list[j].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+ PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
+ j++;
+ }
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = j;
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR :
+ VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = (uint8_t *)list;
+ args.in_args_size = len;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETHER_ADDRESS" :
+ "OP_DEL_ETHER_ADDRESS");
+ rte_free(list);
+ begin = next_begin;
+ } while (begin < AVF_NUM_MACADDR_MAX);
+}
+
+int
+avf_query_stats(struct avf_adapter *adapter,
+ struct virtchnl_eth_stats **pstats)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_queue_select q_stats;
+ struct avf_cmd_info args;
+ int err;
+
+ memset(&q_stats, 0, sizeof(q_stats));
+ q_stats.vsi_id = vf->vsi_res->vsi_id;
+ args.ops = VIRTCHNL_OP_GET_STATS;
+ args.in_args = (uint8_t *)&q_stats;
+ args.in_args_size = sizeof(q_stats);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
+ *pstats = NULL;
+ return err;
+ }
+ *pstats = (struct virtchnl_eth_stats *)args.out_buffer;
+ return 0;
+}
+
+int
+avf_config_promisc(struct avf_adapter *adapter,
+ bool enable_unicast,
+ bool enable_multicast)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_promisc_info promisc;
+ struct avf_cmd_info args;
+ int err;
+
+ promisc.flags = 0;
+ promisc.vsi_id = vf->vsi_res->vsi_id;
+
+ if (enable_unicast)
+ promisc.flags |= FLAG_VF_UNICAST_PROMISC;
+
+ if (enable_multicast)
+ promisc.flags |= FLAG_VF_MULTICAST_PROMISC;
+
+ args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ args.in_args = (uint8_t *)&promisc;
+ args.in_args_size = sizeof(promisc);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+
+ if (err)
+ PMD_DRV_LOG(ERR,
+ "fail to execute command CONFIG_PROMISCUOUS_MODE");
+ return err;
+}
+
+int
+avf_add_del_eth_addr(struct avf_adapter *adapter, struct ether_addr *addr,
+ bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
+ sizeof(struct virtchnl_ether_addr)];
+ struct avf_cmd_info args;
+ int err;
+
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = 1;
+ rte_memcpy(list->list[0].addr, addr->addr_bytes,
+ sizeof(addr->addr_bytes));
+
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
+ return err;
+}
+
+int
+avf_add_del_vlan(struct avf_adapter *adapter, uint16_t vlanid, bool add)
+{
+ struct virtchnl_vlan_filter_list *vlan_list;
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) +
+ sizeof(uint16_t)];
+ struct avf_cmd_info args;
+ int err;
+
+ vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer;
+ vlan_list->vsi_id = vf->vsi_res->vsi_id;
+ vlan_list->num_elements = 1;
+ vlan_list->vlan_id[0] = vlanid;
+
+ args.ops = add ? VIRTCHNL_OP_ADD_VLAN : VIRTCHNL_OP_DEL_VLAN;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(cmd_buffer);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_VLAN" : "OP_DEL_VLAN");
+
+ return err;
+}
diff --git a/drivers/net/avf/base/README b/drivers/net/avf/base/README
new file mode 100644
index 00000000..4710ae27
--- /dev/null
+++ b/drivers/net/avf/base/README
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+Intel® AVF driver
+=================
+
+This directory contains source code of FreeBSD AVF driver of version
+cid-avf.2018.01.02.tar.gz released by the team which develops
+basic drivers for any AVF NIC. The directory of base/ contains the
+original source package.
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ avf_osdep.h
diff --git a/drivers/net/avf/base/avf_adminq.c b/drivers/net/avf/base/avf_adminq.c
new file mode 100644
index 00000000..616e2a9c
--- /dev/null
+++ b/drivers/net/avf/base/avf_adminq.c
@@ -0,0 +1,1010 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "avf_status.h"
+#include "avf_type.h"
+#include "avf_register.h"
+#include "avf_adminq.h"
+#include "avf_prototype.h"
+
+/**
+ * avf_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+STATIC void avf_adminq_init_regs(struct avf_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (avf_is_vf(hw)) {
+ hw->aq.asq.tail = AVF_ATQT1;
+ hw->aq.asq.head = AVF_ATQH1;
+ hw->aq.asq.len = AVF_ATQLEN1;
+ hw->aq.asq.bal = AVF_ATQBAL1;
+ hw->aq.asq.bah = AVF_ATQBAH1;
+ hw->aq.arq.tail = AVF_ARQT1;
+ hw->aq.arq.head = AVF_ARQH1;
+ hw->aq.arq.len = AVF_ARQLEN1;
+ hw->aq.arq.bal = AVF_ARQBAL1;
+ hw->aq.arq.bah = AVF_ARQBAH1;
+ }
+}
+
+/**
+ * avf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+enum avf_status_code avf_alloc_adminq_asq_ring(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code;
+
+ ret_code = avf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ avf_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct avf_aq_desc)),
+ AVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = avf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct avf_asq_cmd_details)));
+ if (ret_code) {
+ avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * avf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+enum avf_status_code avf_alloc_adminq_arq_ring(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code;
+
+ ret_code = avf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ avf_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct avf_aq_desc)),
+ AVF_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * avf_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+void avf_free_adminq_asq(struct avf_hw *hw)
+{
+ avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * avf_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+void avf_free_adminq_arq(struct avf_hw *hw)
+{
+ avf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * avf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum avf_status_code avf_alloc_arq_bufs(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code;
+ struct avf_aq_desc *desc;
+ struct avf_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = avf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct avf_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct avf_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = avf_allocate_dma_mem(hw, bi,
+ avf_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ AVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = AVF_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > AVF_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(AVF_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ CPU_TO_LE32(AVF_HI_DWORD(bi->pa));
+ desc->params.external.addr_low =
+ CPU_TO_LE32(AVF_LO_DWORD(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ avf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ avf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * avf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+STATIC enum avf_status_code avf_alloc_asq_bufs(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code;
+ struct avf_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = avf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct avf_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct avf_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = avf_allocate_dma_mem(hw, bi,
+ avf_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ AVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ avf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ avf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * avf_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void avf_free_arq_bufs(struct avf_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ avf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ avf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ avf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * avf_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void avf_free_asq_bufs(struct avf_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ avf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ avf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ avf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ avf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * avf_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+STATIC enum avf_status_code avf_config_asq_regs(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+#ifdef INTEGRATED_VF
+ if (avf_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ AVF_ATQLEN1_ATQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ AVF_ATQLEN1_ATQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+ wr32(hw, hw->aq.asq.bal, AVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, AVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != AVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
+ ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * avf_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+STATIC enum avf_status_code avf_config_arq_regs(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+#ifdef INTEGRATED_VF
+ if (avf_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ AVF_ARQLEN1_ARQENABLE_MASK));
+#else
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ AVF_ARQLEN1_ARQENABLE_MASK));
+#endif /* INTEGRATED_VF */
+ wr32(hw, hw->aq.arq.bal, AVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, AVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != AVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
+ ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * avf_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum avf_status_code avf_init_asq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = AVF_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = AVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = avf_alloc_adminq_asq_ring(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = avf_alloc_asq_bufs(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = avf_config_asq_regs(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ avf_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * avf_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum avf_status_code avf_init_arq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = AVF_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = AVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = avf_alloc_adminq_arq_ring(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = avf_alloc_arq_bufs(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = avf_config_arq_regs(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ avf_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * avf_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+enum avf_status_code avf_shutdown_asq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+
+ avf_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = AVF_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ avf_free_asq_bufs(hw);
+
+shutdown_asq_out:
+ avf_release_spinlock(&hw->aq.asq_spinlock);
+ return ret_code;
+}
+
+/**
+ * avf_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+enum avf_status_code avf_shutdown_arq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+
+ avf_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = AVF_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ avf_free_arq_bufs(hw);
+
+shutdown_arq_out:
+ avf_release_spinlock(&hw->aq.arq_spinlock);
+ return ret_code;
+}
+
+/**
+ * avf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+enum avf_status_code avf_init_adminq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = AVF_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+ avf_init_spinlock(&hw->aq.asq_spinlock);
+ avf_init_spinlock(&hw->aq.arq_spinlock);
+
+ /* Set up register offsets */
+ avf_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = AVF_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = avf_init_asq(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_destroy_spinlocks;
+
+ /* allocate the ARQ */
+ ret_code = avf_init_arq(hw);
+ if (ret_code != AVF_SUCCESS)
+ goto init_adminq_free_asq;
+
+ ret_code = AVF_SUCCESS;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ avf_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+ avf_destroy_spinlock(&hw->aq.asq_spinlock);
+ avf_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * avf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+enum avf_status_code avf_shutdown_adminq(struct avf_hw *hw)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+
+ if (avf_check_asq_alive(hw))
+ avf_aq_queue_shutdown(hw, true);
+
+ avf_shutdown_asq(hw);
+ avf_shutdown_arq(hw);
+ avf_destroy_spinlock(&hw->aq.asq_spinlock);
+ avf_destroy_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->nvm_buff.va)
+ avf_free_virt_mem(hw, &hw->nvm_buff);
+
+ return ret_code;
+}
+
+/**
+ * avf_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+u16 avf_clean_asq(struct avf_hw *hw)
+{
+ struct avf_adminq_ring *asq = &(hw->aq.asq);
+ struct avf_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct avf_aq_desc desc_cb;
+ struct avf_aq_desc *desc;
+
+ desc = AVF_ADMINQ_DESC(*asq, ntc);
+ details = AVF_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ AVF_ADMINQ_CALLBACK cb_func =
+ (AVF_ADMINQ_CALLBACK)details->callback;
+ avf_memcpy(&desc_cb, desc, sizeof(struct avf_aq_desc),
+ AVF_DMA_TO_DMA);
+ cb_func(hw, &desc_cb);
+ }
+ avf_memset(desc, 0, sizeof(*desc), AVF_DMA_MEM);
+ avf_memset(details, 0, sizeof(*details), AVF_NONDMA_MEM);
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = AVF_ADMINQ_DESC(*asq, ntc);
+ details = AVF_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return AVF_DESC_UNUSED(asq);
+}
+
+/**
+ * avf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool avf_asq_done(struct avf_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * avf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+enum avf_status_code avf_asq_send_command(struct avf_hw *hw,
+ struct avf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+ struct avf_dma_mem *dma_buff = NULL;
+ struct avf_asq_cmd_details *details;
+ struct avf_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+ u32 val = 0;
+
+ avf_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq_last_status = AVF_AQ_RC_OK;
+
+ if (hw->aq.asq.count == 0) {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = AVF_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = AVF_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ details = AVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ avf_memcpy(details,
+ cmd_details,
+ sizeof(struct avf_asq_cmd_details),
+ AVF_NONDMA_TO_NONDMA);
+
+ /* If the cmd_details are defined copy the cookie. The
+ * CPU_TO_LE32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ CPU_TO_LE32(AVF_HI_DWORD(details->cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(AVF_LO_DWORD(details->cookie));
+ }
+ } else {
+ avf_memset(details, 0,
+ sizeof(struct avf_asq_cmd_details),
+ AVF_NONDMA_MEM);
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+ desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ avf_debug(hw,
+ AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = AVF_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ avf_debug(hw,
+ AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = AVF_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (avf_clean_asq(hw) == 0) {
+ avf_debug(hw,
+ AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = AVF_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = AVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ avf_memcpy(desc_on_ring, desc, sizeof(struct avf_aq_desc),
+ AVF_NONDMA_TO_DMA);
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ avf_memcpy(dma_buff->va, buff, buff_size,
+ AVF_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ CPU_TO_LE32(AVF_HI_DWORD(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ CPU_TO_LE32(AVF_LO_DWORD(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (avf_asq_done(hw))
+ break;
+ avf_usec_delay(50);
+ total_delay += 50;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (avf_asq_done(hw)) {
+ avf_memcpy(desc, desc_on_ring, sizeof(struct avf_aq_desc),
+ AVF_DMA_TO_NONDMA);
+ if (buff != NULL)
+ avf_memcpy(buff, dma_buff->va, buff_size,
+ AVF_DMA_TO_NONDMA);
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval != 0) {
+ avf_debug(hw,
+ AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum avf_admin_queue_err)retval == AVF_AQ_RC_OK)
+ status = AVF_SUCCESS;
+ else
+ status = AVF_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum avf_admin_queue_err)retval;
+ }
+
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ avf_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(struct avf_aq_desc), AVF_DMA_TO_NONDMA);
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ if (rd32(hw, hw->aq.asq.len) & AVF_ATQLEN1_ATQCRIT_MASK) {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = AVF_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+ }
+
+asq_send_command_error:
+ avf_release_spinlock(&hw->aq.asq_spinlock);
+ return status;
+}
+
+/**
+ * avf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void avf_fill_default_direct_cmd_desc(struct avf_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ avf_memset((void *)desc, 0, sizeof(struct avf_aq_desc),
+ AVF_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_SI);
+}
+
+/**
+ * avf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+enum avf_status_code avf_clean_arq_element(struct avf_hw *hw,
+ struct avf_arq_event_info *e,
+ u16 *pending)
+{
+ enum avf_status_code ret_code = AVF_SUCCESS;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct avf_aq_desc *desc;
+ struct avf_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ avf_memset(&e->desc, 0, sizeof(e->desc), AVF_NONDMA_MEM);
+
+ /* take the lock before we start messing with the ring */
+ avf_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ if (hw->aq.arq.count == 0) {
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = AVF_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
+ /* set next_to_use to head */
+#ifdef INTEGRATED_VF
+ if (!avf_is_vf(hw))
+ ntu = rd32(hw, hw->aq.arq.head) & AVF_PF_ARQH_ARQH_MASK;
+ else
+ ntu = rd32(hw, hw->aq.arq.head) & AVF_ARQH1_ARQH_MASK;
+#else
+ ntu = rd32(hw, hw->aq.arq.head) & AVF_ARQH1_ARQH_MASK;
+#endif /* INTEGRATED_VF */
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = AVF_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = AVF_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ hw->aq.arq_last_status =
+ (enum avf_admin_queue_err)LE16_TO_CPU(desc->retval);
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & AVF_AQ_FLAG_ERR) {
+ ret_code = AVF_ERR_ADMIN_QUEUE_ERROR;
+ avf_debug(hw,
+ AVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ avf_memcpy(&e->desc, desc, sizeof(struct avf_aq_desc),
+ AVF_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ avf_memcpy(e->msg_buf,
+ hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len, AVF_DMA_TO_NONDMA);
+
+ avf_debug(hw, AVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ avf_debug_aq(hw, AVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ avf_memset((void *)desc, 0, sizeof(struct avf_aq_desc), AVF_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(AVF_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > AVF_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(AVF_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->params.external.addr_high = CPU_TO_LE32(AVF_HI_DWORD(bi->pa));
+ desc->params.external.addr_low = CPU_TO_LE32(AVF_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
+ avf_release_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
diff --git a/drivers/net/avf/base/avf_adminq.h b/drivers/net/avf/base/avf_adminq.h
new file mode 100644
index 00000000..d7d242a9
--- /dev/null
+++ b/drivers/net/avf/base/avf_adminq.h
@@ -0,0 +1,166 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_ADMINQ_H_
+#define _AVF_ADMINQ_H_
+
+#include "avf_osdep.h"
+#include "avf_status.h"
+#include "avf_adminq_cmd.h"
+
+#define AVF_ADMINQ_DESC(R, i) \
+ (&(((struct avf_aq_desc *)((R).desc_buf.va))[i]))
+
+#define AVF_ADMINQ_DESC_ALIGNMENT 4096
+
+struct avf_adminq_ring {
+ struct avf_virt_mem dma_head; /* space for dma structures */
+ struct avf_dma_mem desc_buf; /* descriptor ring memory */
+ struct avf_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct avf_dma_mem *asq_bi;
+ struct avf_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct avf_asq_cmd_details {
+ void *callback; /* cast from type AVF_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+ struct avf_aq_desc *wb_desc;
+};
+
+#define AVF_ADMINQ_DETAILS(R, i) \
+ (&(((struct avf_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct avf_arq_event_info {
+ struct avf_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct avf_adminq_info {
+ struct avf_adminq_ring arq; /* receive queue */
+ struct avf_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct avf_spinlock asq_spinlock; /* Send queue spinlock */
+ struct avf_spinlock arq_spinlock; /* Receive queue spinlock */
+
+ /* last status values on send and receive queues */
+ enum avf_admin_queue_err asq_last_status;
+ enum avf_admin_queue_err arq_last_status;
+};
+
+/**
+ * avf_aq_rc_to_posix - convert errors to user-land codes
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
+ **/
+STATIC INLINE int avf_aq_rc_to_posix(int aq_ret, int aq_rc)
+{
+ int aq_to_posix[] = {
+ 0, /* AVF_AQ_RC_OK */
+ -EPERM, /* AVF_AQ_RC_EPERM */
+ -ENOENT, /* AVF_AQ_RC_ENOENT */
+ -ESRCH, /* AVF_AQ_RC_ESRCH */
+ -EINTR, /* AVF_AQ_RC_EINTR */
+ -EIO, /* AVF_AQ_RC_EIO */
+ -ENXIO, /* AVF_AQ_RC_ENXIO */
+ -E2BIG, /* AVF_AQ_RC_E2BIG */
+ -EAGAIN, /* AVF_AQ_RC_EAGAIN */
+ -ENOMEM, /* AVF_AQ_RC_ENOMEM */
+ -EACCES, /* AVF_AQ_RC_EACCES */
+ -EFAULT, /* AVF_AQ_RC_EFAULT */
+ -EBUSY, /* AVF_AQ_RC_EBUSY */
+ -EEXIST, /* AVF_AQ_RC_EEXIST */
+ -EINVAL, /* AVF_AQ_RC_EINVAL */
+ -ENOTTY, /* AVF_AQ_RC_ENOTTY */
+ -ENOSPC, /* AVF_AQ_RC_ENOSPC */
+ -ENOSYS, /* AVF_AQ_RC_ENOSYS */
+ -ERANGE, /* AVF_AQ_RC_ERANGE */
+ -EPIPE, /* AVF_AQ_RC_EFLUSHED */
+ -ESPIPE, /* AVF_AQ_RC_BAD_ADDR */
+ -EROFS, /* AVF_AQ_RC_EMODE */
+ -EFBIG, /* AVF_AQ_RC_EFBIG */
+ };
+
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == AVF_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+ return -ERANGE;
+
+ return aq_to_posix[aq_rc];
+}
+
+/* general information */
+#define AVF_AQ_LARGE_BUF 512
+#define AVF_ASQ_CMD_TIMEOUT 250000 /* usecs */
+
+void avf_fill_default_direct_cmd_desc(struct avf_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _AVF_ADMINQ_H_ */
diff --git a/drivers/net/avf/base/avf_adminq_cmd.h b/drivers/net/avf/base/avf_adminq_cmd.h
new file mode 100644
index 00000000..1709f317
--- /dev/null
+++ b/drivers/net/avf/base/avf_adminq_cmd.h
@@ -0,0 +1,2842 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_ADMINQ_CMD_H_
+#define _AVF_ADMINQ_CMD_H_
+
+/* This header file defines the avf Admin Queue commands and is shared between
+ * avf Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+
+#define AVF_FW_API_VERSION_MAJOR 0x0001
+#define AVF_FW_API_VERSION_MINOR_X722 0x0005
+#define AVF_FW_API_VERSION_MINOR_X710 0x0007
+
+#define AVF_FW_MINOR_VERSION(_h) ((_h)->mac.type == AVF_MAC_XL710 ? \
+ AVF_FW_API_VERSION_MINOR_X710 : \
+ AVF_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define AVF_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+
+struct avf_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define AVF_AQ_FLAG_DD_SHIFT 0
+#define AVF_AQ_FLAG_CMP_SHIFT 1
+#define AVF_AQ_FLAG_ERR_SHIFT 2
+#define AVF_AQ_FLAG_VFE_SHIFT 3
+#define AVF_AQ_FLAG_LB_SHIFT 9
+#define AVF_AQ_FLAG_RD_SHIFT 10
+#define AVF_AQ_FLAG_VFC_SHIFT 11
+#define AVF_AQ_FLAG_BUF_SHIFT 12
+#define AVF_AQ_FLAG_SI_SHIFT 13
+#define AVF_AQ_FLAG_EI_SHIFT 14
+#define AVF_AQ_FLAG_FE_SHIFT 15
+
+#define AVF_AQ_FLAG_DD (1 << AVF_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define AVF_AQ_FLAG_CMP (1 << AVF_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define AVF_AQ_FLAG_ERR (1 << AVF_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define AVF_AQ_FLAG_VFE (1 << AVF_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define AVF_AQ_FLAG_LB (1 << AVF_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define AVF_AQ_FLAG_RD (1 << AVF_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define AVF_AQ_FLAG_VFC (1 << AVF_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define AVF_AQ_FLAG_BUF (1 << AVF_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define AVF_AQ_FLAG_SI (1 << AVF_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define AVF_AQ_FLAG_EI (1 << AVF_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define AVF_AQ_FLAG_FE (1 << AVF_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum avf_admin_queue_err {
+ AVF_AQ_RC_OK = 0, /* success */
+ AVF_AQ_RC_EPERM = 1, /* Operation not permitted */
+ AVF_AQ_RC_ENOENT = 2, /* No such element */
+ AVF_AQ_RC_ESRCH = 3, /* Bad opcode */
+ AVF_AQ_RC_EINTR = 4, /* operation interrupted */
+ AVF_AQ_RC_EIO = 5, /* I/O error */
+ AVF_AQ_RC_ENXIO = 6, /* No such resource */
+ AVF_AQ_RC_E2BIG = 7, /* Arg too long */
+ AVF_AQ_RC_EAGAIN = 8, /* Try again */
+ AVF_AQ_RC_ENOMEM = 9, /* Out of memory */
+ AVF_AQ_RC_EACCES = 10, /* Permission denied */
+ AVF_AQ_RC_EFAULT = 11, /* Bad address */
+ AVF_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ AVF_AQ_RC_EEXIST = 13, /* object already exists */
+ AVF_AQ_RC_EINVAL = 14, /* Invalid argument */
+ AVF_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ AVF_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ AVF_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ AVF_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ AVF_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ AVF_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ AVF_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ AVF_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum avf_admin_queue_opc {
+ /* aq commands */
+ avf_aqc_opc_get_version = 0x0001,
+ avf_aqc_opc_driver_version = 0x0002,
+ avf_aqc_opc_queue_shutdown = 0x0003,
+ avf_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ avf_aqc_opc_request_resource = 0x0008,
+ avf_aqc_opc_release_resource = 0x0009,
+
+ avf_aqc_opc_list_func_capabilities = 0x000A,
+ avf_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ avf_aqc_opc_set_proxy_config = 0x0104,
+ avf_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ avf_aqc_opc_mac_address_read = 0x0107,
+ avf_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ avf_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ avf_aqc_opc_set_wol_filter = 0x0120,
+ avf_aqc_opc_get_wake_reason = 0x0121,
+ avf_aqc_opc_clear_all_wol_filters = 0x025E,
+
+ /* internal switch commands */
+ avf_aqc_opc_get_switch_config = 0x0200,
+ avf_aqc_opc_add_statistics = 0x0201,
+ avf_aqc_opc_remove_statistics = 0x0202,
+ avf_aqc_opc_set_port_parameters = 0x0203,
+ avf_aqc_opc_get_switch_resource_alloc = 0x0204,
+ avf_aqc_opc_set_switch_config = 0x0205,
+ avf_aqc_opc_rx_ctl_reg_read = 0x0206,
+ avf_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ avf_aqc_opc_add_vsi = 0x0210,
+ avf_aqc_opc_update_vsi_parameters = 0x0211,
+ avf_aqc_opc_get_vsi_parameters = 0x0212,
+
+ avf_aqc_opc_add_pv = 0x0220,
+ avf_aqc_opc_update_pv_parameters = 0x0221,
+ avf_aqc_opc_get_pv_parameters = 0x0222,
+
+ avf_aqc_opc_add_veb = 0x0230,
+ avf_aqc_opc_update_veb_parameters = 0x0231,
+ avf_aqc_opc_get_veb_parameters = 0x0232,
+
+ avf_aqc_opc_delete_element = 0x0243,
+
+ avf_aqc_opc_add_macvlan = 0x0250,
+ avf_aqc_opc_remove_macvlan = 0x0251,
+ avf_aqc_opc_add_vlan = 0x0252,
+ avf_aqc_opc_remove_vlan = 0x0253,
+ avf_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ avf_aqc_opc_add_tag = 0x0255,
+ avf_aqc_opc_remove_tag = 0x0256,
+ avf_aqc_opc_add_multicast_etag = 0x0257,
+ avf_aqc_opc_remove_multicast_etag = 0x0258,
+ avf_aqc_opc_update_tag = 0x0259,
+ avf_aqc_opc_add_control_packet_filter = 0x025A,
+ avf_aqc_opc_remove_control_packet_filter = 0x025B,
+ avf_aqc_opc_add_cloud_filters = 0x025C,
+ avf_aqc_opc_remove_cloud_filters = 0x025D,
+ avf_aqc_opc_clear_wol_switch_filters = 0x025E,
+ avf_aqc_opc_replace_cloud_filters = 0x025F,
+
+ avf_aqc_opc_add_mirror_rule = 0x0260,
+ avf_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ avf_aqc_opc_write_personalization_profile = 0x0270,
+ avf_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ avf_aqc_opc_dcb_ignore_pfc = 0x0301,
+ avf_aqc_opc_dcb_updated = 0x0302,
+ avf_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ avf_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ avf_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ avf_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ avf_aqc_opc_query_vsi_bw_config = 0x0408,
+ avf_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ avf_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ avf_aqc_opc_enable_switching_comp_ets = 0x0413,
+ avf_aqc_opc_modify_switching_comp_ets = 0x0414,
+ avf_aqc_opc_disable_switching_comp_ets = 0x0415,
+ avf_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ avf_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ avf_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ avf_aqc_opc_query_port_ets_config = 0x0419,
+ avf_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ avf_aqc_opc_suspend_port_tx = 0x041B,
+ avf_aqc_opc_resume_port_tx = 0x041C,
+ avf_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ avf_aqc_opc_query_hmc_resource_profile = 0x0500,
+ avf_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+
+ /* phy commands*/
+ avf_aqc_opc_get_phy_abilities = 0x0600,
+ avf_aqc_opc_set_phy_config = 0x0601,
+ avf_aqc_opc_set_mac_config = 0x0603,
+ avf_aqc_opc_set_link_restart_an = 0x0605,
+ avf_aqc_opc_get_link_status = 0x0607,
+ avf_aqc_opc_set_phy_int_mask = 0x0613,
+ avf_aqc_opc_get_local_advt_reg = 0x0614,
+ avf_aqc_opc_set_local_advt_reg = 0x0615,
+ avf_aqc_opc_get_partner_advt = 0x0616,
+ avf_aqc_opc_set_lb_modes = 0x0618,
+ avf_aqc_opc_get_phy_wol_caps = 0x0621,
+ avf_aqc_opc_set_phy_debug = 0x0622,
+ avf_aqc_opc_upload_ext_phy_fm = 0x0625,
+ avf_aqc_opc_run_phy_activity = 0x0626,
+ avf_aqc_opc_set_phy_register = 0x0628,
+ avf_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ avf_aqc_opc_nvm_read = 0x0701,
+ avf_aqc_opc_nvm_erase = 0x0702,
+ avf_aqc_opc_nvm_update = 0x0703,
+ avf_aqc_opc_nvm_config_read = 0x0704,
+ avf_aqc_opc_nvm_config_write = 0x0705,
+ avf_aqc_opc_nvm_progress = 0x0706,
+ avf_aqc_opc_oem_post_update = 0x0720,
+ avf_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ avf_aqc_opc_send_msg_to_pf = 0x0801,
+ avf_aqc_opc_send_msg_to_vf = 0x0802,
+ avf_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ avf_aqc_opc_alternate_write = 0x0900,
+ avf_aqc_opc_alternate_write_indirect = 0x0901,
+ avf_aqc_opc_alternate_read = 0x0902,
+ avf_aqc_opc_alternate_read_indirect = 0x0903,
+ avf_aqc_opc_alternate_write_done = 0x0904,
+ avf_aqc_opc_alternate_set_mode = 0x0905,
+ avf_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ avf_aqc_opc_lldp_get_mib = 0x0A00,
+ avf_aqc_opc_lldp_update_mib = 0x0A01,
+ avf_aqc_opc_lldp_add_tlv = 0x0A02,
+ avf_aqc_opc_lldp_update_tlv = 0x0A03,
+ avf_aqc_opc_lldp_delete_tlv = 0x0A04,
+ avf_aqc_opc_lldp_stop = 0x0A05,
+ avf_aqc_opc_lldp_start = 0x0A06,
+ avf_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ avf_aqc_opc_lldp_set_local_mib = 0x0A08,
+ avf_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+
+ /* Tunnel commands */
+ avf_aqc_opc_add_udp_tunnel = 0x0B00,
+ avf_aqc_opc_del_udp_tunnel = 0x0B01,
+ avf_aqc_opc_set_rss_key = 0x0B02,
+ avf_aqc_opc_set_rss_lut = 0x0B03,
+ avf_aqc_opc_get_rss_key = 0x0B04,
+ avf_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ avf_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ avf_aqc_opc_oem_parameter_change = 0xFE00,
+ avf_aqc_opc_oem_device_status_change = 0xFE01,
+ avf_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ avf_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ avf_aqc_opc_debug_read_reg = 0xFF03,
+ avf_aqc_opc_debug_write_reg = 0xFF04,
+ avf_aqc_opc_debug_modify_reg = 0xFF07,
+ avf_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define AVF_CHECK_STRUCT_LEN(n, X) enum avf_static_assert_enum_##X \
+ { avf_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define AVF_CHECK_CMD_LENGTH(X) AVF_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct avf_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct avf_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct avf_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define AVF_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct avf_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define AVF_AQ_RESOURCE_NVM 1
+#define AVF_AQ_RESOURCE_SDP 2
+#define AVF_AQ_RESOURCE_ACCESS_READ 1
+#define AVF_AQ_RESOURCE_ACCESS_WRITE 2
+#define AVF_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define AVF_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct avf_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct avf_aqc_list_capabilites {
+ u8 command_flags;
+#define AVF_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_list_capabilites);
+
+struct avf_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define AVF_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define AVF_AQ_CAP_ID_MNG_MODE 0x0002
+#define AVF_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define AVF_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define AVF_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define AVF_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define AVF_AQ_CAP_ID_WOL_AND_PROXY 0x0008
+#define AVF_AQ_CAP_ID_SRIOV 0x0012
+#define AVF_AQ_CAP_ID_VF 0x0013
+#define AVF_AQ_CAP_ID_VMDQ 0x0014
+#define AVF_AQ_CAP_ID_8021QBG 0x0015
+#define AVF_AQ_CAP_ID_8021QBR 0x0016
+#define AVF_AQ_CAP_ID_VSI 0x0017
+#define AVF_AQ_CAP_ID_DCB 0x0018
+#define AVF_AQ_CAP_ID_FCOE 0x0021
+#define AVF_AQ_CAP_ID_ISCSI 0x0022
+#define AVF_AQ_CAP_ID_RSS 0x0040
+#define AVF_AQ_CAP_ID_RXQ 0x0041
+#define AVF_AQ_CAP_ID_TXQ 0x0042
+#define AVF_AQ_CAP_ID_MSIX 0x0043
+#define AVF_AQ_CAP_ID_VF_MSIX 0x0044
+#define AVF_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define AVF_AQ_CAP_ID_1588 0x0046
+#define AVF_AQ_CAP_ID_IWARP 0x0051
+#define AVF_AQ_CAP_ID_LED 0x0061
+#define AVF_AQ_CAP_ID_SDP 0x0062
+#define AVF_AQ_CAP_ID_MDIO 0x0063
+#define AVF_AQ_CAP_ID_WSR_PROT 0x0064
+#define AVF_AQ_CAP_ID_NVM_MGMT 0x0080
+#define AVF_AQ_CAP_ID_FLEX10 0x00F1
+#define AVF_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct avf_aqc_cppm_configuration {
+ __le16 command_flags;
+#define AVF_AQ_CPPM_EN_LTRC 0x0800
+#define AVF_AQ_CPPM_EN_DMCTH 0x1000
+#define AVF_AQ_CPPM_EN_DMCTLX 0x2000
+#define AVF_AQ_CPPM_EN_HPTC 0x4000
+#define AVF_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct avf_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define AVF_AQ_ARP_INIT_IPV4 0x0800
+#define AVF_AQ_ARP_UNSUP_CTL 0x1000
+#define AVF_AQ_ARP_ENA 0x2000
+#define AVF_AQ_ARP_ADD_IPV4 0x4000
+#define AVF_AQ_ARP_DEL_IPV4 0x8000
+ __le16 table_id;
+ __le32 enabled_offloads;
+#define AVF_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define AVF_AQ_ARP_OFFLOAD_ENABLE 0x00000800
+ __le32 ip_addr;
+ u8 mac_addr[6];
+ u8 reserved[2];
+};
+
+AVF_CHECK_STRUCT_LEN(0x14, avf_aqc_arp_proxy_data);
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct avf_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define AVF_AQ_NS_PROXY_ADD_0 0x0001
+#define AVF_AQ_NS_PROXY_DEL_0 0x0002
+#define AVF_AQ_NS_PROXY_ADD_1 0x0004
+#define AVF_AQ_NS_PROXY_DEL_1 0x0008
+#define AVF_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define AVF_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define AVF_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define AVF_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define AVF_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define AVF_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define AVF_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define AVF_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define AVF_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+AVF_CHECK_STRUCT_LEN(0x3c, avf_aqc_ns_proxy_data);
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct avf_aqc_mng_laa {
+ __le16 command_flags;
+#define AVF_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_mng_laa);
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct avf_aqc_mac_address_read {
+ __le16 command_flags;
+#define AVF_AQC_LAN_ADDR_VALID 0x10
+#define AVF_AQC_SAN_ADDR_VALID 0x20
+#define AVF_AQC_PORT_ADDR_VALID 0x40
+#define AVF_AQC_WOL_ADDR_VALID 0x80
+#define AVF_AQC_MC_MAG_EN_VALID 0x100
+#define AVF_AQC_WOL_PRESERVE_STATUS 0x200
+#define AVF_AQC_ADDR_VALID_MASK 0x3F0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_mac_address_read);
+
+struct avf_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+AVF_CHECK_STRUCT_LEN(24, avf_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct avf_aqc_mac_address_write {
+ __le16 command_flags;
+#define AVF_AQC_MC_MAG_EN 0x0100
+#define AVF_AQC_WOL_PRESERVE_ON_PFR 0x0200
+#define AVF_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define AVF_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define AVF_AQC_WRITE_TYPE_PORT 0x8000
+#define AVF_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define AVF_AQC_WRITE_TYPE_MASK 0xC000
+
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct avf_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_clear_pxe);
+
+/* Set WoL Filter (0x0120) */
+
+struct avf_aqc_set_wol_filter {
+ __le16 filter_index;
+#define AVF_AQC_MAX_NUM_WOL_FILTERS 8
+#define AVF_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define AVF_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ AVF_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define AVF_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define AVF_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ AVF_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define AVF_AQC_SET_WOL_FILTER 0x8000
+#define AVF_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define AVF_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
+#define AVF_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define AVF_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define AVF_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define AVF_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_wol_filter);
+
+struct avf_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+AVF_CHECK_STRUCT_LEN(0x90, avf_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct avf_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ AVF_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_wake_reason_completion);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct avf_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses avf_aqc_switch_seid for the descriptor
+ */
+struct avf_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_switch_config_header_resp);
+
+struct avf_aqc_switch_config_element_resp {
+ u8 element_type;
+#define AVF_AQ_SW_ELEM_TYPE_MAC 1
+#define AVF_AQ_SW_ELEM_TYPE_PF 2
+#define AVF_AQ_SW_ELEM_TYPE_VF 3
+#define AVF_AQ_SW_ELEM_TYPE_EMP 4
+#define AVF_AQ_SW_ELEM_TYPE_BMC 5
+#define AVF_AQ_SW_ELEM_TYPE_PV 16
+#define AVF_AQ_SW_ELEM_TYPE_VEB 17
+#define AVF_AQ_SW_ELEM_TYPE_PA 18
+#define AVF_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define AVF_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define AVF_AQ_CONN_TYPE_REGULAR 0x1
+#define AVF_AQ_CONN_TYPE_DEFAULT 0x2
+#define AVF_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+AVF_CHECK_STRUCT_LEN(0x10, avf_aqc_switch_config_element_resp);
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct avf_aqc_get_switch_config_resp {
+ struct avf_aqc_get_switch_config_header_resp header;
+ struct avf_aqc_switch_config_element_resp element[1];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_get_switch_config_resp);
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct avf_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct avf_aqc_set_port_parameters {
+ __le16 command_flags;
+#define AVF_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define AVF_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define AVF_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+#define AVF_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define AVF_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct avf_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct avf_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define AVF_AQ_RESOURCE_TYPE_VEB 0x0
+#define AVF_AQ_RESOURCE_TYPE_VSI 0x1
+#define AVF_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define AVF_AQ_RESOURCE_TYPE_STAG 0x3
+#define AVF_AQ_RESOURCE_TYPE_ETAG 0x4
+#define AVF_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define AVF_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define AVF_AQ_RESOURCE_TYPE_VLAN 0x7
+#define AVF_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define AVF_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define AVF_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define AVF_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define AVF_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define AVF_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define AVF_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define AVF_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define AVF_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define AVF_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define AVF_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+AVF_CHECK_STRUCT_LEN(0x10, avf_aqc_switch_resource_alloc_element_resp);
+
+/* Set Switch Configuration (direct 0x0205) */
+struct avf_aqc_set_switch_config {
+ __le16 flags;
+/* flags used for both fields below */
+#define AVF_AQ_SET_SWITCH_CFG_PROMISC 0x0001
+#define AVF_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+#define AVF_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
+ __le16 valid_flags;
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ u8 reserved[6];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_switch_config);
+
+/* Read Receive control registers (direct 0x0206)
+ * Write Receive control registers (direct 0x0207)
+ * used for accessing Rx control registers that can be
+ * slow and need special handling when under high Rx load
+ */
+struct avf_aqc_rx_ctl_reg_read_write {
+ __le32 reserved1;
+ __le32 address;
+ __le32 reserved2;
+ __le32 value;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_rx_ctl_reg_read_write);
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct avf_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct avf_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define AVF_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define AVF_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define AVF_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define AVF_AQ_VSI_TYPE_SHIFT 0x0
+#define AVF_AQ_VSI_TYPE_MASK (0x3 << AVF_AQ_VSI_TYPE_SHIFT)
+#define AVF_AQ_VSI_TYPE_VF 0x0
+#define AVF_AQ_VSI_TYPE_VMDQ2 0x1
+#define AVF_AQ_VSI_TYPE_PF 0x2
+#define AVF_AQ_VSI_TYPE_EMP_MNG 0x3
+#define AVF_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_get_update_vsi);
+
+struct avf_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_get_update_vsi_completion);
+
+struct avf_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define AVF_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define AVF_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define AVF_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define AVF_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define AVF_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define AVF_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define AVF_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define AVF_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define AVF_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define AVF_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define AVF_AQ_VSI_SW_ID_SHIFT 0x0000
+#define AVF_AQ_VSI_SW_ID_MASK (0xFFF << AVF_AQ_VSI_SW_ID_SHIFT)
+#define AVF_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define AVF_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define AVF_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define AVF_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define AVF_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define AVF_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define AVF_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define AVF_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ AVF_AQ_VSI_PVLAN_MODE_SHIFT)
+#define AVF_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define AVF_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define AVF_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define AVF_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define AVF_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define AVF_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ AVF_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define AVF_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define AVF_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define AVF_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define AVF_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define AVF_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define AVF_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define AVF_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define AVF_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define AVF_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define AVF_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define AVF_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define AVF_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define AVF_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define AVF_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ AVF_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define AVF_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define AVF_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ AVF_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define AVF_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define AVF_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define AVF_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define AVF_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define AVF_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define AVF_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define AVF_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define AVF_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define AVF_AQ_VSI_QUEUE_SHIFT 0x0
+#define AVF_AQ_VSI_QUEUE_MASK (0x7FF << AVF_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define AVF_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define AVF_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ AVF_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define AVF_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define AVF_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ AVF_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define AVF_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define AVF_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#define AVF_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define AVF_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define AVF_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define AVF_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define AVF_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+AVF_CHECK_STRUCT_LEN(128, avf_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct avf_aqc_add_update_pv {
+ __le16 command_flags;
+#define AVF_AQC_PV_FLAG_PV_TYPE 0x1
+#define AVF_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define AVF_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define AVF_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_update_pv);
+
+struct avf_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define AVF_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define AVF_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define AVF_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define AVF_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses avf_aqc_switch_seid for the descriptor
+ */
+
+struct avf_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define AVF_AQC_GET_PV_PV_TYPE 0x1
+#define AVF_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define AVF_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct avf_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define AVF_AQC_ADD_VEB_FLOATING 0x1
+#define AVF_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define AVF_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ AVF_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define AVF_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define AVF_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define AVF_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */
+#define AVF_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_veb);
+
+struct avf_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define AVF_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define AVF_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define AVF_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define AVF_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses avf_aqc_switch_seid for the descriptor
+ */
+struct avf_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic avf_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct avf_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define AVF_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define AVF_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_macvlan);
+
+/* indirect data for command and response */
+struct avf_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define AVF_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define AVF_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define AVF_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define AVF_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+#define AVF_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010
+ __le16 queue_number;
+#define AVF_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define AVF_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ AVF_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define AVF_AQC_MM_PERFECT_MATCH 0x01
+#define AVF_AQC_MM_HASH_MATCH 0x02
+#define AVF_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct avf_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses avf_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct avf_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define AVF_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define AVF_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define AVF_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define AVF_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define AVF_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define AVF_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic avf_aqc_macvlan for the command
+ */
+struct avf_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define AVF_AQC_ADD_VLAN_LOCAL 0x1
+#define AVF_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define AVF_AQC_ADD_PVLAN_TYPE_MASK (0x3 << AVF_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define AVF_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define AVF_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define AVF_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define AVF_AQC_VLAN_PTYPE_SHIFT 3
+#define AVF_AQC_VLAN_PTYPE_MASK (0x3 << AVF_AQC_VLAN_PTYPE_SHIFT)
+#define AVF_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define AVF_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define AVF_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define AVF_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define AVF_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define AVF_AQC_ADD_VLAN_SUCCESS 0x0
+#define AVF_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define AVF_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define AVF_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define AVF_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct avf_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct avf_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define AVF_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define AVF_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define AVF_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define AVF_AQC_SET_VSI_DEFAULT 0x08
+#define AVF_AQC_SET_VSI_PROMISC_VLAN 0x10
+#define AVF_AQC_SET_VSI_PROMISC_TX 0x8000
+ __le16 seid;
+#define AVF_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define AVF_AQC_SET_VSI_VLAN_MASK 0x0FFF
+#define AVF_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic avf_aqc_add_remove_tag_completion for completion
+ */
+struct avf_aqc_add_tag {
+ __le16 flags;
+#define AVF_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define AVF_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_tag);
+
+struct avf_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic avf_aqc_add_remove_tag_completion for completion
+ */
+struct avf_aqc_remove_tag {
+ __le16 seid;
+#define AVF_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_remove_tag);
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct avf_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_mcast_etag);
+
+struct avf_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct avf_aqc_update_tag {
+ __le16 seid;
+#define AVF_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_update_tag);
+
+struct avf_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the avf_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct avf_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define AVF_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define AVF_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define AVF_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define AVF_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define AVF_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define AVF_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_control_packet_filter);
+
+struct avf_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the avf_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct avf_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define AVF_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define AVF_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ AVF_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 big_buffer_flag;
+#define AVF_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1
+ u8 reserved2[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_remove_cloud_filters);
+
+struct avf_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define AVF_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define AVF_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ AVF_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
+#define AVF_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define AVF_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define AVF_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define AVF_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define AVF_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define AVF_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define AVF_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define AVF_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x0010 to 0x0017 is for custom filters */
+
+#define AVF_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define AVF_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define AVF_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define AVF_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define AVF_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4
+#define AVF_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5
+
+#define AVF_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000
+#define AVF_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000
+#define AVF_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define AVF_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define AVF_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
+ AVF_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define AVF_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define AVF_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+/* avf_aqc_add_rm_cloud_filt_elem_ext is used when
+ * AVF_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set. refer to
+ * DCR288
+ */
+struct avf_aqc_add_rm_cloud_filt_elem_ext {
+ struct avf_aqc_add_remove_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define AVF_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
+struct avf_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_remove_cloud_filters_completion);
+
+/* Replace filter Command 0x025F
+ * uses the avf_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct avf_filter_data {
+ u8 filter_type;
+ u8 input[3];
+};
+
+struct avf_aqc_replace_cloud_filters_cmd {
+ u8 valid_flags;
+#define AVF_AQC_REPLACE_L1_FILTER 0x0
+#define AVF_AQC_REPLACE_CLOUD_FILTER 0x1
+#define AVF_AQC_GET_CLOUD_FILTERS 0x2
+#define AVF_AQC_MIRROR_CLOUD_FILTER 0x4
+#define AVF_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
+ u8 old_filter_type;
+ u8 new_filter_type;
+ u8 tr_bit;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct avf_aqc_replace_cloud_filters_cmd_buf {
+ u8 data[32];
+/* Filter type INPUT codes*/
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL)
+
+/* Field Vector offsets */
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
+/* big FLU */
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
+/* big FLU */
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
+
+#define AVF_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
+ struct avf_filter_data filters[8];
+};
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct avf_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define AVF_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define AVF_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ AVF_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define AVF_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define AVF_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define AVF_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define AVF_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define AVF_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_delete_mirror_rule);
+
+struct avf_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_delete_mirror_rule_completion);
+
+/* Dynamic Device Personalization */
+struct avf_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_write_personalization_profile);
+
+struct avf_aqc_write_ddp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct avf_aqc_get_applied_profiles {
+ u8 flags;
+#define AVF_AQC_GET_DDP_GET_CONF 0x1
+#define AVF_AQC_GET_DDP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_applied_profiles);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct avf_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define AVF_AQC_PFC_IGNORE_SET 0x80
+#define AVF_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the avf_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct avf_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct avf_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct avf_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with avf_aqc_qs_handles_resp
+ */
+struct avf_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+AVF_CHECK_STRUCT_LEN(0x40, avf_aqc_configure_vsi_ets_sla_bw_data);
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with avf_aqc_qs_handles_resp
+ */
+struct avf_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_configure_vsi_tc_bw_data);
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct avf_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+AVF_CHECK_STRUCT_LEN(0x40, avf_aqc_query_vsi_bw_config_resp);
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct avf_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_query_vsi_ets_sla_config_resp);
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct avf_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct avf_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define AVF_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
+};
+
+AVF_CHECK_STRUCT_LEN(0x80, avf_aqc_configure_switching_comp_ets_data);
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct avf_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+AVF_CHECK_STRUCT_LEN(0x40,
+ avf_aqc_configure_switching_comp_ets_bw_limit_data);
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct avf_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_configure_switching_comp_bw_config_data);
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct avf_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+AVF_CHECK_STRUCT_LEN(0x40, avf_aqc_query_switching_comp_ets_config_resp);
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct avf_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+AVF_CHECK_STRUCT_LEN(0x44, avf_aqc_query_port_ets_config_resp);
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct avf_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_query_switching_comp_bw_config_resp);
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct avf_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
+AVF_CHECK_STRUCT_LEN(0x22, avf_aqc_configure_partition_bw_data);
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct avf_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aq_get_set_hmc_resource_profile);
+
+enum avf_aq_hmc_profile {
+ /* AVF_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ AVF_HMC_PROFILE_DEFAULT = 1,
+ AVF_HMC_PROFILE_FAVOR_VF = 2,
+ AVF_HMC_PROFILE_EQUAL = 3,
+};
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define AVF_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define AVF_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum avf_aq_phy_type {
+ AVF_PHY_TYPE_SGMII = 0x0,
+ AVF_PHY_TYPE_1000BASE_KX = 0x1,
+ AVF_PHY_TYPE_10GBASE_KX4 = 0x2,
+ AVF_PHY_TYPE_10GBASE_KR = 0x3,
+ AVF_PHY_TYPE_40GBASE_KR4 = 0x4,
+ AVF_PHY_TYPE_XAUI = 0x5,
+ AVF_PHY_TYPE_XFI = 0x6,
+ AVF_PHY_TYPE_SFI = 0x7,
+ AVF_PHY_TYPE_XLAUI = 0x8,
+ AVF_PHY_TYPE_XLPPI = 0x9,
+ AVF_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ AVF_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ AVF_PHY_TYPE_10GBASE_AOC = 0xC,
+ AVF_PHY_TYPE_40GBASE_AOC = 0xD,
+ AVF_PHY_TYPE_UNRECOGNIZED = 0xE,
+ AVF_PHY_TYPE_UNSUPPORTED = 0xF,
+ AVF_PHY_TYPE_100BASE_TX = 0x11,
+ AVF_PHY_TYPE_1000BASE_T = 0x12,
+ AVF_PHY_TYPE_10GBASE_T = 0x13,
+ AVF_PHY_TYPE_10GBASE_SR = 0x14,
+ AVF_PHY_TYPE_10GBASE_LR = 0x15,
+ AVF_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ AVF_PHY_TYPE_10GBASE_CR1 = 0x17,
+ AVF_PHY_TYPE_40GBASE_CR4 = 0x18,
+ AVF_PHY_TYPE_40GBASE_SR4 = 0x19,
+ AVF_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ AVF_PHY_TYPE_1000BASE_SX = 0x1B,
+ AVF_PHY_TYPE_1000BASE_LX = 0x1C,
+ AVF_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ AVF_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ AVF_PHY_TYPE_25GBASE_KR = 0x1F,
+ AVF_PHY_TYPE_25GBASE_CR = 0x20,
+ AVF_PHY_TYPE_25GBASE_SR = 0x21,
+ AVF_PHY_TYPE_25GBASE_LR = 0x22,
+ AVF_PHY_TYPE_25GBASE_AOC = 0x23,
+ AVF_PHY_TYPE_25GBASE_ACC = 0x24,
+ AVF_PHY_TYPE_MAX,
+ AVF_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
+ AVF_PHY_TYPE_EMPTY = 0xFE,
+ AVF_PHY_TYPE_DEFAULT = 0xFF,
+};
+
+#define AVF_LINK_SPEED_100MB_SHIFT 0x1
+#define AVF_LINK_SPEED_1000MB_SHIFT 0x2
+#define AVF_LINK_SPEED_10GB_SHIFT 0x3
+#define AVF_LINK_SPEED_40GB_SHIFT 0x4
+#define AVF_LINK_SPEED_20GB_SHIFT 0x5
+#define AVF_LINK_SPEED_25GB_SHIFT 0x6
+
+enum avf_aq_link_speed {
+ AVF_LINK_SPEED_UNKNOWN = 0,
+ AVF_LINK_SPEED_100MB = (1 << AVF_LINK_SPEED_100MB_SHIFT),
+ AVF_LINK_SPEED_1GB = (1 << AVF_LINK_SPEED_1000MB_SHIFT),
+ AVF_LINK_SPEED_10GB = (1 << AVF_LINK_SPEED_10GB_SHIFT),
+ AVF_LINK_SPEED_40GB = (1 << AVF_LINK_SPEED_40GB_SHIFT),
+ AVF_LINK_SPEED_20GB = (1 << AVF_LINK_SPEED_20GB_SHIFT),
+ AVF_LINK_SPEED_25GB = (1 << AVF_LINK_SPEED_25GB_SHIFT),
+};
+
+struct avf_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_module_desc);
+
+struct avf_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define AVF_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define AVF_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define AVF_AQ_PHY_FLAG_LOW_POWER 0x04
+#define AVF_AQ_PHY_LINK_ENABLED 0x08
+#define AVF_AQ_PHY_AN_ENABLED 0x10
+#define AVF_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define AVF_AQ_PHY_FEC_ABILITY_KR 0x40
+#define AVF_AQ_PHY_FEC_ABILITY_RS 0x80
+ __le16 eee_capability;
+#define AVF_AQ_EEE_100BASE_TX 0x0002
+#define AVF_AQ_EEE_1000BASE_T 0x0004
+#define AVF_AQ_EEE_10GBASE_T 0x0008
+#define AVF_AQ_EEE_1000BASE_KX 0x0010
+#define AVF_AQ_EEE_10GBASE_KX4 0x0020
+#define AVF_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define AVF_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 phy_type_ext;
+#define AVF_AQ_PHY_TYPE_EXT_25G_KR 0x01
+#define AVF_AQ_PHY_TYPE_EXT_25G_CR 0x02
+#define AVF_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define AVF_AQ_PHY_TYPE_EXT_25G_LR 0x08
+#define AVF_AQ_PHY_TYPE_EXT_25G_AOC 0x10
+#define AVF_AQ_PHY_TYPE_EXT_25G_ACC 0x20
+ u8 fec_cfg_curr_mod_ext_info;
+#define AVF_AQ_ENABLE_FEC_KR 0x01
+#define AVF_AQ_ENABLE_FEC_RS 0x02
+#define AVF_AQ_REQUEST_FEC_KR 0x04
+#define AVF_AQ_REQUEST_FEC_RS 0x08
+#define AVF_AQ_ENABLE_FEC_AUTO 0x10
+#define AVF_AQ_FEC
+#define AVF_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define AVF_AQ_MODULE_TYPE_EXT_SHIFT 5
+
+ u8 ext_comp_code;
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define AVF_AQ_PHY_MAX_QMS 16
+ struct avf_aqc_module_desc qualified_module[AVF_AQ_PHY_MAX_QMS];
+};
+
+AVF_CHECK_STRUCT_LEN(0x218, avf_aq_get_phy_abilities_resp);
+
+/* Set PHY Config (direct 0x0601) */
+struct avf_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define AVF_AQ_PHY_ENABLE_LINK 0x08
+#define AVF_AQ_PHY_ENABLE_AN 0x10
+#define AVF_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 phy_type_ext;
+ u8 fec_config;
+#define AVF_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define AVF_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define AVF_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define AVF_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define AVF_AQ_SET_FEC_AUTO BIT(4)
+#define AVF_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define AVF_AQ_PHY_FEC_CONFIG_MASK (0x1F << AVF_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct avf_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define AVF_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define AVF_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define AVF_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define AVF_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define AVF_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define AVF_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define AVF_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define AVF_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define AVF_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define AVF_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define AVF_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define AVF_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define AVF_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define AVF_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct avf_aqc_set_link_restart_an {
+ u8 command;
+#define AVF_AQ_PHY_RESTART_AN 0x02
+#define AVF_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct avf_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define AVF_AQ_LSE_MASK 0x3
+#define AVF_AQ_LSE_NOP 0x0
+#define AVF_AQ_LSE_DISABLE 0x2
+#define AVF_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define AVF_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* avf_aq_phy_type */
+ u8 link_speed; /* avf_aq_link_speed */
+ u8 link_info;
+#define AVF_AQ_LINK_UP 0x01 /* obsolete */
+#define AVF_AQ_LINK_UP_FUNCTION 0x01
+#define AVF_AQ_LINK_FAULT 0x02
+#define AVF_AQ_LINK_FAULT_TX 0x04
+#define AVF_AQ_LINK_FAULT_RX 0x08
+#define AVF_AQ_LINK_FAULT_REMOTE 0x10
+#define AVF_AQ_LINK_UP_PORT 0x20
+#define AVF_AQ_MEDIA_AVAILABLE 0x40
+#define AVF_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define AVF_AQ_AN_COMPLETED 0x01
+#define AVF_AQ_LP_AN_ABILITY 0x02
+#define AVF_AQ_PD_FAULT 0x04
+#define AVF_AQ_FEC_EN 0x08
+#define AVF_AQ_PHY_LOW_POWER 0x10
+#define AVF_AQ_LINK_PAUSE_TX 0x20
+#define AVF_AQ_LINK_PAUSE_RX 0x40
+#define AVF_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define AVF_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define AVF_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define AVF_AQ_LINK_TX_SHIFT 0x02
+#define AVF_AQ_LINK_TX_MASK (0x03 << AVF_AQ_LINK_TX_SHIFT)
+#define AVF_AQ_LINK_TX_ACTIVE 0x00
+#define AVF_AQ_LINK_TX_DRAINED 0x01
+#define AVF_AQ_LINK_TX_FLUSHED 0x03
+#define AVF_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define AVF_AQ_25G_NO_ERR 0X00
+#define AVF_AQ_25G_NOT_PRESENT 0X01
+#define AVF_AQ_25G_NVM_CRC_ERR 0X02
+#define AVF_AQ_25G_SBUS_UCODE_ERR 0X03
+#define AVF_AQ_25G_SERDES_UCODE_ERR 0X04
+#define AVF_AQ_25G_NIMB_UCODE_ERR 0X05
+ u8 loopback; /* use defines from avf_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define AVF_AQ_LOOPBACK_MASK 0x07
+#define AVF_AQ_PWR_CLASS_SHIFT_LB 6
+#define AVF_AQ_PWR_CLASS_MASK_LB (0x03 << AVF_AQ_PWR_CLASS_SHIFT_LB)
+ __le16 max_frame_size;
+ u8 config;
+#define AVF_AQ_CONFIG_FEC_KR_ENA 0x01
+#define AVF_AQ_CONFIG_FEC_RS_ENA 0x02
+#define AVF_AQ_CONFIG_CRC_ENA 0x04
+#define AVF_AQ_CONFIG_PACING_MASK 0x78
+ union {
+ struct {
+ u8 power_desc;
+#define AVF_AQ_LINK_POWER_CLASS_1 0x00
+#define AVF_AQ_LINK_POWER_CLASS_2 0x01
+#define AVF_AQ_LINK_POWER_CLASS_3 0x02
+#define AVF_AQ_LINK_POWER_CLASS_4 0x03
+#define AVF_AQ_PWR_CLASS_MASK 0x03
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct avf_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define AVF_AQ_EVENT_LINK_UPDOWN 0x0002
+#define AVF_AQ_EVENT_MEDIA_NA 0x0004
+#define AVF_AQ_EVENT_LINK_FAULT 0x0008
+#define AVF_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define AVF_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define AVF_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define AVF_AQ_EVENT_AN_COMPLETED 0x0080
+#define AVF_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define AVF_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct avf_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct avf_aqc_set_lb_mode {
+ u8 lb_level;
+#define AVF_AQ_LB_NONE 0
+#define AVF_AQ_LB_MAC 1
+#define AVF_AQ_LB_SERDES 2
+#define AVF_AQ_LB_PHY_INT 3
+#define AVF_AQ_LB_PHY_EXT 4
+#define AVF_AQ_LB_CPVL_PCS 5
+#define AVF_AQ_LB_CPVL_EXT 6
+#define AVF_AQ_LB_PHY_LOCAL 0x01
+#define AVF_AQ_LB_PHY_REMOTE 0x02
+#define AVF_AQ_LB_MAC_LOCAL 0x04
+ u8 lb_type;
+#define AVF_AQ_LB_LOCAL 0
+#define AVF_AQ_LB_FAR 0x01
+ u8 speed;
+#define AVF_AQ_LB_SPEED_NONE 0
+#define AVF_AQ_LB_SPEED_1G 1
+#define AVF_AQ_LB_SPEED_10G 2
+#define AVF_AQ_LB_SPEED_40G 3
+#define AVF_AQ_LB_SPEED_20G 4
+ u8 force_speed;
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct avf_aqc_set_phy_debug {
+ u8 command_flags;
+#define AVF_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define AVF_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
+#define AVF_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports needs both bits 4 and 5 */
+#define AVF_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_phy_debug);
+
+enum avf_aq_phy_reg_type {
+ AVF_AQC_PHY_REG_INTERNAL = 0x1,
+ AVF_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ AVF_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* Run PHY Activity (0x0626) */
+struct avf_aqc_run_phy_activity {
+ __le16 activity_id;
+ u8 flags;
+ u8 reserved1;
+ __le32 control;
+ __le32 data;
+ u8 reserved2[4];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_run_phy_activity);
+
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct avf_aqc_phy_register_access {
+ u8 phy_interface;
+#define AVF_AQ_PHY_REG_ACCESS_INTERNAL 0
+#define AVF_AQ_PHY_REG_ACCESS_EXTERNAL 1
+#define AVF_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
+ u8 dev_addres;
+ u8 reserved1[2];
+ __le32 reg_address;
+ __le32 reg_value;
+ u8 reserved2[4];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_phy_register_access);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct avf_aqc_nvm_update {
+ u8 command_flags;
+#define AVF_AQ_NVM_LAST_CMD 0x01
+#define AVF_AQ_NVM_FLASH_ONLY 0x80
+#define AVF_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define AVF_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define AVF_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define AVF_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct avf_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define AVF_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define AVF_AQ_ANVM_READ_SINGLE_FEATURE 0
+#define AVF_AQ_ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ __le16 element_id_msw; /* MSWord of field ID */
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct avf_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_nvm_config_write);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define AVF_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
+#define AVF_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+ (1 << AVF_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define AVF_AQ_ANVM_FEATURE 0
+#define AVF_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+struct avf_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+#define AVF_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
+#define AVF_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
+#define AVF_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+AVF_CHECK_STRUCT_LEN(0x6, avf_aqc_nvm_config_data_feature);
+
+struct avf_aqc_nvm_config_data_immediate_field {
+ __le32 field_id;
+ __le32 field_value;
+ __le16 field_options;
+ __le16 reserved;
+};
+
+AVF_CHECK_STRUCT_LEN(0xc, avf_aqc_nvm_config_data_immediate_field);
+
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+struct avf_aqc_nvm_oem_post_update {
+#define AVF_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
+ u8 sel_data;
+ u8 reserved[7];
+};
+
+AVF_CHECK_STRUCT_LEN(0x8, avf_aqc_nvm_oem_post_update);
+
+struct avf_aqc_nvm_oem_post_update_buffer {
+ u8 str_len;
+ u8 dev_addr;
+ __le16 eeprom_addr;
+ u8 data[36];
+};
+
+AVF_CHECK_STRUCT_LEN(0x28, avf_aqc_nvm_oem_post_update_buffer);
+
+/* Thermal Sensor (indirect 0x0721)
+ * read or set thermal sensor configs and values
+ * takes a sensor and command specific data buffer, not detailed here
+ */
+struct avf_aqc_thermal_sensor {
+ u8 sensor_action;
+#define AVF_AQ_THERMAL_SENSOR_READ_CONFIG 0
+#define AVF_AQ_THERMAL_SENSOR_SET_CONFIG 1
+#define AVF_AQ_THERMAL_SENSOR_READ_TEMP 2
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_thermal_sensor);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct avf_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct avf_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct avf_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses avf_aq_desc
+ */
+struct avf_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define AVF_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define AVF_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define AVF_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define AVF_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct avf_aqc_alternate_set_mode {
+ __le32 mode;
+#define AVF_AQ_ALTERNATE_MODE_NONE 0
+#define AVF_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses avf_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct avf_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct avf_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define AVF_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define AVF_AQ_LLDP_MIB_LOCAL 0x0
+#define AVF_AQ_LLDP_MIB_REMOTE 0x1
+#define AVF_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define AVF_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define AVF_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define AVF_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define AVF_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define AVF_AQ_LLDP_TX_SHIFT 0x4
+#define AVF_AQ_LLDP_TX_MASK (0x03 << AVF_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use AVF_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct avf_aqc_lldp_update_mib {
+ u8 command;
+#define AVF_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define AVF_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct avf_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct avf_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct avf_aqc_lldp_stop {
+ u8 command;
+#define AVF_AQ_LLDP_AGENT_STOP 0x0
+#define AVF_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct avf_aqc_lldp_start {
+ u8 command;
+#define AVF_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_start);
+
+/* Set DCB (direct 0x0303) */
+struct avf_aqc_set_dcb_parameters {
+ u8 command;
+#define AVF_AQ_DCB_SET_AGENT 0x1
+#define AVF_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_set_dcb_parameters);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define AVF_AQC_CEE_APP_FCOE_SHIFT 0x0
+#define AVF_AQC_CEE_APP_FCOE_MASK (0x7 << AVF_AQC_CEE_APP_FCOE_SHIFT)
+#define AVF_AQC_CEE_APP_ISCSI_SHIFT 0x3
+#define AVF_AQC_CEE_APP_ISCSI_MASK (0x7 << AVF_AQC_CEE_APP_ISCSI_SHIFT)
+#define AVF_AQC_CEE_APP_FIP_SHIFT 0x8
+#define AVF_AQC_CEE_APP_FIP_MASK (0x7 << AVF_AQC_CEE_APP_FIP_SHIFT)
+
+#define AVF_AQC_CEE_PG_STATUS_SHIFT 0x0
+#define AVF_AQC_CEE_PG_STATUS_MASK (0x7 << AVF_AQC_CEE_PG_STATUS_SHIFT)
+#define AVF_AQC_CEE_PFC_STATUS_SHIFT 0x3
+#define AVF_AQC_CEE_PFC_STATUS_MASK (0x7 << AVF_AQC_CEE_PFC_STATUS_SHIFT)
+#define AVF_AQC_CEE_APP_STATUS_SHIFT 0x8
+#define AVF_AQC_CEE_APP_STATUS_MASK (0x7 << AVF_AQC_CEE_APP_STATUS_SHIFT)
+#define AVF_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define AVF_AQC_CEE_FCOE_STATUS_MASK (0x7 << AVF_AQC_CEE_FCOE_STATUS_SHIFT)
+#define AVF_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define AVF_AQC_CEE_ISCSI_STATUS_MASK (0x7 << AVF_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define AVF_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define AVF_AQC_CEE_FIP_STATUS_MASK (0x7 << AVF_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct avf_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct avf_aqc_get_cee_dcb_cfg_v1_resp {
+ u8 reserved1;
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 reserved2;
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ u8 reserved3[2];
+ __le16 oper_app_prio;
+ u8 reserved4[2];
+ __le16 tlv_status;
+};
+
+AVF_CHECK_STRUCT_LEN(0x18, avf_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct avf_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+ __le32 tlv_status;
+ u8 reserved[12];
+};
+
+AVF_CHECK_STRUCT_LEN(0x20, avf_aqc_get_cee_dcb_cfg_resp);
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct avf_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
+ u8 type;
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_set_local_mib);
+
+struct avf_aqc_lldp_set_local_mib_resp {
+#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK 0x01
+ u8 status;
+ u8 reserved[15];
+};
+
+AVF_CHECK_STRUCT_LEN(0x10, avf_aqc_lldp_set_local_mib_resp);
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct avf_aqc_lldp_stop_start_specific_agent {
+#define AVF_AQC_START_SPECIFIC_AGENT_SHIFT 0
+#define AVF_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << AVF_AQC_START_SPECIFIC_AGENT_SHIFT)
+ u8 command;
+ u8 reserved[15];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_lldp_stop_start_specific_agent);
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct avf_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
+#define AVF_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define AVF_AQC_TUNNEL_TYPE_NGE 0x01
+#define AVF_AQC_TUNNEL_TYPE_TEREDO 0x10
+#define AVF_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11
+ u8 reserved1[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_udp_tunnel);
+
+struct avf_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define AVF_AQC_SINGLE_PF 0x0
+#define AVF_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct avf_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_remove_udp_tunnel);
+
+struct avf_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_del_udp_tunnel_completion);
+
+struct avf_aqc_get_set_rss_key {
+#define AVF_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define AVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define AVF_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ AVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_set_rss_key);
+
+struct avf_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+AVF_CHECK_STRUCT_LEN(0x34, avf_aqc_get_set_rss_key_data);
+
+struct avf_aqc_get_set_rss_lut {
+#define AVF_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define AVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define AVF_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ AVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define AVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define AVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ AVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define AVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define AVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_get_set_rss_lut);
+
+/* tunnel key structure 0x0B10 */
+
+struct avf_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define AVF_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define AVF_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define AVF_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define AVF_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define AVF_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define AVF_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define AVF_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define AVF_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct avf_aqc_oem_param_change {
+ __le32 param_type;
+#define AVF_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define AVF_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define AVF_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ __le16 param_value2;
+ u8 reserved[6];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_oem_param_change);
+
+struct avf_aqc_oem_state_change {
+ __le32 state;
+#define AVF_AQ_OEM_STATE_LINK_DOWN 0x0
+#define AVF_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_oem_state_change);
+
+/* Initialize OCSD (0xFE02, direct) */
+struct avf_aqc_opc_oem_ocsd_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocsd_memory_block_addr_high;
+ __le32 ocsd_memory_block_addr_low;
+ __le32 requested_update_interval;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB (0xFE03, direct) */
+struct avf_aqc_opc_oem_ocbb_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocbb_memory_block_addr_high;
+ __le32 ocbb_memory_block_addr_low;
+ u8 reserved2[4];
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_opc_oem_ocbb_initialize);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct avf_acq_set_test_mode {
+ u8 mode;
+#define AVF_AQ_TEST_PARTIAL 0
+#define AVF_AQ_TEST_FULL 1
+#define AVF_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define AVF_AQ_TEST_OPEN 0
+#define AVF_AQ_TEST_CLOSE 1
+#define AVF_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct avf_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* avf_aq_desc is used for the command */
+struct avf_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct avf_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define AVF_AQ_CLUSTER_ID_AUX 0
+#define AVF_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define AVF_AQ_CLUSTER_ID_TXSCHED 2
+#define AVF_AQ_CLUSTER_ID_HMC 3
+#define AVF_AQ_CLUSTER_ID_MAC0 4
+#define AVF_AQ_CLUSTER_ID_MAC1 5
+#define AVF_AQ_CLUSTER_ID_MAC2 6
+#define AVF_AQ_CLUSTER_ID_MAC3 7
+#define AVF_AQ_CLUSTER_ID_DCB 8
+#define AVF_AQ_CLUSTER_ID_EMP_MEM 9
+#define AVF_AQ_CLUSTER_ID_PKT_BUF 10
+#define AVF_AQ_CLUSTER_ID_ALTRAM 11
+
+struct avf_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_debug_dump_internals);
+
+struct avf_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+AVF_CHECK_CMD_LENGTH(avf_aqc_debug_modify_internals);
+
+#endif /* _AVF_ADMINQ_CMD_H_ */
diff --git a/drivers/net/avf/base/avf_alloc.h b/drivers/net/avf/base/avf_alloc.h
new file mode 100644
index 00000000..21e29bd0
--- /dev/null
+++ b/drivers/net/avf/base/avf_alloc.h
@@ -0,0 +1,65 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_ALLOC_H_
+#define _AVF_ALLOC_H_
+
+struct avf_hw;
+
+/* Memory allocation types */
+enum avf_memory_type {
+ avf_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ avf_mem_asq_buf = 1,
+ avf_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ avf_mem_arq_ring = 3, /* ARQ descriptor ring */
+ avf_mem_atq_ring = 4, /* ATQ descriptor ring */
+ avf_mem_pd = 5, /* Page Descriptor */
+ avf_mem_bp = 6, /* Backing Page - 4KB */
+ avf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ avf_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum avf_status_code avf_allocate_dma_mem(struct avf_hw *hw,
+ struct avf_dma_mem *mem,
+ enum avf_memory_type type,
+ u64 size, u32 alignment);
+enum avf_status_code avf_free_dma_mem(struct avf_hw *hw,
+ struct avf_dma_mem *mem);
+enum avf_status_code avf_allocate_virt_mem(struct avf_hw *hw,
+ struct avf_virt_mem *mem,
+ u32 size);
+enum avf_status_code avf_free_virt_mem(struct avf_hw *hw,
+ struct avf_virt_mem *mem);
+
+#endif /* _AVF_ALLOC_H_ */
diff --git a/drivers/net/avf/base/avf_common.c b/drivers/net/avf/base/avf_common.c
new file mode 100644
index 00000000..bbaadada
--- /dev/null
+++ b/drivers/net/avf/base/avf_common.c
@@ -0,0 +1,1845 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "avf_type.h"
+#include "avf_adminq.h"
+#include "avf_prototype.h"
+#include "virtchnl.h"
+
+
+/**
+ * avf_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+enum avf_status_code avf_set_mac_type(struct avf_hw *hw)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+
+ DEBUGFUNC("avf_set_mac_type\n");
+
+ if (hw->vendor_id == AVF_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ /* TODO: remove undefined device ID now, need to think how to
+ * remove them in share code
+ */
+ case AVF_DEV_ID_ADAPTIVE_VF:
+ hw->mac.type = AVF_MAC_VF;
+ break;
+ default:
+ hw->mac.type = AVF_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = AVF_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ DEBUGOUT2("avf_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * avf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *avf_aq_str(struct avf_hw *hw, enum avf_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case AVF_AQ_RC_OK:
+ return "OK";
+ case AVF_AQ_RC_EPERM:
+ return "AVF_AQ_RC_EPERM";
+ case AVF_AQ_RC_ENOENT:
+ return "AVF_AQ_RC_ENOENT";
+ case AVF_AQ_RC_ESRCH:
+ return "AVF_AQ_RC_ESRCH";
+ case AVF_AQ_RC_EINTR:
+ return "AVF_AQ_RC_EINTR";
+ case AVF_AQ_RC_EIO:
+ return "AVF_AQ_RC_EIO";
+ case AVF_AQ_RC_ENXIO:
+ return "AVF_AQ_RC_ENXIO";
+ case AVF_AQ_RC_E2BIG:
+ return "AVF_AQ_RC_E2BIG";
+ case AVF_AQ_RC_EAGAIN:
+ return "AVF_AQ_RC_EAGAIN";
+ case AVF_AQ_RC_ENOMEM:
+ return "AVF_AQ_RC_ENOMEM";
+ case AVF_AQ_RC_EACCES:
+ return "AVF_AQ_RC_EACCES";
+ case AVF_AQ_RC_EFAULT:
+ return "AVF_AQ_RC_EFAULT";
+ case AVF_AQ_RC_EBUSY:
+ return "AVF_AQ_RC_EBUSY";
+ case AVF_AQ_RC_EEXIST:
+ return "AVF_AQ_RC_EEXIST";
+ case AVF_AQ_RC_EINVAL:
+ return "AVF_AQ_RC_EINVAL";
+ case AVF_AQ_RC_ENOTTY:
+ return "AVF_AQ_RC_ENOTTY";
+ case AVF_AQ_RC_ENOSPC:
+ return "AVF_AQ_RC_ENOSPC";
+ case AVF_AQ_RC_ENOSYS:
+ return "AVF_AQ_RC_ENOSYS";
+ case AVF_AQ_RC_ERANGE:
+ return "AVF_AQ_RC_ERANGE";
+ case AVF_AQ_RC_EFLUSHED:
+ return "AVF_AQ_RC_EFLUSHED";
+ case AVF_AQ_RC_BAD_ADDR:
+ return "AVF_AQ_RC_BAD_ADDR";
+ case AVF_AQ_RC_EMODE:
+ return "AVF_AQ_RC_EMODE";
+ case AVF_AQ_RC_EFBIG:
+ return "AVF_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * avf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *avf_stat_str(struct avf_hw *hw, enum avf_status_code stat_err)
+{
+ switch (stat_err) {
+ case AVF_SUCCESS:
+ return "OK";
+ case AVF_ERR_NVM:
+ return "AVF_ERR_NVM";
+ case AVF_ERR_NVM_CHECKSUM:
+ return "AVF_ERR_NVM_CHECKSUM";
+ case AVF_ERR_PHY:
+ return "AVF_ERR_PHY";
+ case AVF_ERR_CONFIG:
+ return "AVF_ERR_CONFIG";
+ case AVF_ERR_PARAM:
+ return "AVF_ERR_PARAM";
+ case AVF_ERR_MAC_TYPE:
+ return "AVF_ERR_MAC_TYPE";
+ case AVF_ERR_UNKNOWN_PHY:
+ return "AVF_ERR_UNKNOWN_PHY";
+ case AVF_ERR_LINK_SETUP:
+ return "AVF_ERR_LINK_SETUP";
+ case AVF_ERR_ADAPTER_STOPPED:
+ return "AVF_ERR_ADAPTER_STOPPED";
+ case AVF_ERR_INVALID_MAC_ADDR:
+ return "AVF_ERR_INVALID_MAC_ADDR";
+ case AVF_ERR_DEVICE_NOT_SUPPORTED:
+ return "AVF_ERR_DEVICE_NOT_SUPPORTED";
+ case AVF_ERR_MASTER_REQUESTS_PENDING:
+ return "AVF_ERR_MASTER_REQUESTS_PENDING";
+ case AVF_ERR_INVALID_LINK_SETTINGS:
+ return "AVF_ERR_INVALID_LINK_SETTINGS";
+ case AVF_ERR_AUTONEG_NOT_COMPLETE:
+ return "AVF_ERR_AUTONEG_NOT_COMPLETE";
+ case AVF_ERR_RESET_FAILED:
+ return "AVF_ERR_RESET_FAILED";
+ case AVF_ERR_SWFW_SYNC:
+ return "AVF_ERR_SWFW_SYNC";
+ case AVF_ERR_NO_AVAILABLE_VSI:
+ return "AVF_ERR_NO_AVAILABLE_VSI";
+ case AVF_ERR_NO_MEMORY:
+ return "AVF_ERR_NO_MEMORY";
+ case AVF_ERR_BAD_PTR:
+ return "AVF_ERR_BAD_PTR";
+ case AVF_ERR_RING_FULL:
+ return "AVF_ERR_RING_FULL";
+ case AVF_ERR_INVALID_PD_ID:
+ return "AVF_ERR_INVALID_PD_ID";
+ case AVF_ERR_INVALID_QP_ID:
+ return "AVF_ERR_INVALID_QP_ID";
+ case AVF_ERR_INVALID_CQ_ID:
+ return "AVF_ERR_INVALID_CQ_ID";
+ case AVF_ERR_INVALID_CEQ_ID:
+ return "AVF_ERR_INVALID_CEQ_ID";
+ case AVF_ERR_INVALID_AEQ_ID:
+ return "AVF_ERR_INVALID_AEQ_ID";
+ case AVF_ERR_INVALID_SIZE:
+ return "AVF_ERR_INVALID_SIZE";
+ case AVF_ERR_INVALID_ARP_INDEX:
+ return "AVF_ERR_INVALID_ARP_INDEX";
+ case AVF_ERR_INVALID_FPM_FUNC_ID:
+ return "AVF_ERR_INVALID_FPM_FUNC_ID";
+ case AVF_ERR_QP_INVALID_MSG_SIZE:
+ return "AVF_ERR_QP_INVALID_MSG_SIZE";
+ case AVF_ERR_QP_TOOMANY_WRS_POSTED:
+ return "AVF_ERR_QP_TOOMANY_WRS_POSTED";
+ case AVF_ERR_INVALID_FRAG_COUNT:
+ return "AVF_ERR_INVALID_FRAG_COUNT";
+ case AVF_ERR_QUEUE_EMPTY:
+ return "AVF_ERR_QUEUE_EMPTY";
+ case AVF_ERR_INVALID_ALIGNMENT:
+ return "AVF_ERR_INVALID_ALIGNMENT";
+ case AVF_ERR_FLUSHED_QUEUE:
+ return "AVF_ERR_FLUSHED_QUEUE";
+ case AVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "AVF_ERR_INVALID_PUSH_PAGE_INDEX";
+ case AVF_ERR_INVALID_IMM_DATA_SIZE:
+ return "AVF_ERR_INVALID_IMM_DATA_SIZE";
+ case AVF_ERR_TIMEOUT:
+ return "AVF_ERR_TIMEOUT";
+ case AVF_ERR_OPCODE_MISMATCH:
+ return "AVF_ERR_OPCODE_MISMATCH";
+ case AVF_ERR_CQP_COMPL_ERROR:
+ return "AVF_ERR_CQP_COMPL_ERROR";
+ case AVF_ERR_INVALID_VF_ID:
+ return "AVF_ERR_INVALID_VF_ID";
+ case AVF_ERR_INVALID_HMCFN_ID:
+ return "AVF_ERR_INVALID_HMCFN_ID";
+ case AVF_ERR_BACKING_PAGE_ERROR:
+ return "AVF_ERR_BACKING_PAGE_ERROR";
+ case AVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "AVF_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case AVF_ERR_INVALID_PBLE_INDEX:
+ return "AVF_ERR_INVALID_PBLE_INDEX";
+ case AVF_ERR_INVALID_SD_INDEX:
+ return "AVF_ERR_INVALID_SD_INDEX";
+ case AVF_ERR_INVALID_PAGE_DESC_INDEX:
+ return "AVF_ERR_INVALID_PAGE_DESC_INDEX";
+ case AVF_ERR_INVALID_SD_TYPE:
+ return "AVF_ERR_INVALID_SD_TYPE";
+ case AVF_ERR_MEMCPY_FAILED:
+ return "AVF_ERR_MEMCPY_FAILED";
+ case AVF_ERR_INVALID_HMC_OBJ_INDEX:
+ return "AVF_ERR_INVALID_HMC_OBJ_INDEX";
+ case AVF_ERR_INVALID_HMC_OBJ_COUNT:
+ return "AVF_ERR_INVALID_HMC_OBJ_COUNT";
+ case AVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "AVF_ERR_INVALID_SRQ_ARM_LIMIT";
+ case AVF_ERR_SRQ_ENABLED:
+ return "AVF_ERR_SRQ_ENABLED";
+ case AVF_ERR_ADMIN_QUEUE_ERROR:
+ return "AVF_ERR_ADMIN_QUEUE_ERROR";
+ case AVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "AVF_ERR_ADMIN_QUEUE_TIMEOUT";
+ case AVF_ERR_BUF_TOO_SHORT:
+ return "AVF_ERR_BUF_TOO_SHORT";
+ case AVF_ERR_ADMIN_QUEUE_FULL:
+ return "AVF_ERR_ADMIN_QUEUE_FULL";
+ case AVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return "AVF_ERR_ADMIN_QUEUE_NO_WORK";
+ case AVF_ERR_BAD_IWARP_CQE:
+ return "AVF_ERR_BAD_IWARP_CQE";
+ case AVF_ERR_NVM_BLANK_MODE:
+ return "AVF_ERR_NVM_BLANK_MODE";
+ case AVF_ERR_NOT_IMPLEMENTED:
+ return "AVF_ERR_NOT_IMPLEMENTED";
+ case AVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "AVF_ERR_PE_DOORBELL_NOT_ENABLED";
+ case AVF_ERR_DIAG_TEST_FAILED:
+ return "AVF_ERR_DIAG_TEST_FAILED";
+ case AVF_ERR_NOT_READY:
+ return "AVF_ERR_NOT_READY";
+ case AVF_NOT_SUPPORTED:
+ return "AVF_NOT_SUPPORTED";
+ case AVF_ERR_FIRMWARE_API_VERSION:
+ return "AVF_ERR_FIRMWARE_API_VERSION";
+ case AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
+ * avf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void avf_debug_aq(struct avf_hw *hw, enum avf_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct avf_aq_desc *aq_desc = (struct avf_aq_desc *)desc;
+ u8 *buf = (u8 *)buffer;
+ u16 len;
+ u16 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ len = LE16_TO_CPU(aq_desc->datalen);
+
+ avf_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ LE16_TO_CPU(aq_desc->opcode),
+ LE16_TO_CPU(aq_desc->flags),
+ LE16_TO_CPU(aq_desc->datalen),
+ LE16_TO_CPU(aq_desc->retval));
+ avf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->cookie_high),
+ LE32_TO_CPU(aq_desc->cookie_low));
+ avf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.internal.param0),
+ LE32_TO_CPU(aq_desc->params.internal.param1));
+ avf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(aq_desc->params.external.addr_high),
+ LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ avf_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ avf_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+ buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+ buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+ buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+ /* the most we could have left is 16 bytes, pad with zeros */
+ if (i < len) {
+ char d_buf[16];
+ int j, i_sav;
+
+ i_sav = i;
+ memset(d_buf, 0, sizeof(d_buf));
+ for (j = 0; i < len; j++, i++)
+ d_buf[j] = buf[i];
+ avf_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+ d_buf[4], d_buf[5], d_buf[6], d_buf[7],
+ d_buf[8], d_buf[9], d_buf[10], d_buf[11],
+ d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
+ }
+ }
+}
+
+/**
+ * avf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool avf_check_asq_alive(struct avf_hw *hw)
+{
+ if (hw->aq.asq.len)
+#ifdef INTEGRATED_VF
+ if (avf_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ AVF_ATQLEN1_ATQENABLE_MASK);
+#else
+ return !!(rd32(hw, hw->aq.asq.len) &
+ AVF_ATQLEN1_ATQENABLE_MASK);
+#endif /* INTEGRATED_VF */
+ return false;
+}
+
+/**
+ * avf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum avf_status_code avf_aq_queue_shutdown(struct avf_hw *hw,
+ bool unloading)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_queue_shutdown *cmd =
+ (struct avf_aqc_queue_shutdown *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = CPU_TO_LE32(AVF_AQ_DRIVER_UNLOADING);
+ status = avf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * avf_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+STATIC enum avf_status_code avf_aq_get_set_rss_lut(struct avf_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum avf_status_code status;
+ struct avf_aq_desc desc;
+ struct avf_aqc_get_set_rss_lut *cmd_resp =
+ (struct avf_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_set_rss_lut);
+ else
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ AVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ AVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)AVF_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((AVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ AVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ AVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((AVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ AVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ AVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = avf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * avf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum avf_status_code avf_aq_get_rss_lut(struct avf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return avf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * avf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum avf_status_code avf_aq_set_rss_lut(struct avf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return avf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * avf_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+STATIC enum avf_status_code avf_aq_get_set_rss_key(struct avf_hw *hw,
+ u16 vsi_id,
+ struct avf_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum avf_status_code status;
+ struct avf_aq_desc desc;
+ struct avf_aqc_get_set_rss_key *cmd_resp =
+ (struct avf_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct avf_aqc_get_set_rss_key_data);
+
+ if (set)
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_set_rss_key);
+ else
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ AVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ AVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)AVF_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = avf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * avf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum avf_status_code avf_aq_get_rss_key(struct avf_hw *hw,
+ u16 vsi_id,
+ struct avf_aqc_get_set_rss_key_data *key)
+{
+ return avf_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * avf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum avf_status_code avf_aq_set_rss_key(struct avf_hw *hw,
+ u16 vsi_id,
+ struct avf_aqc_get_set_rss_key_data *key)
+{
+ return avf_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
+/* The avf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT avf_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF avf_ptype_lookup[ptype].outer_ip == AVF_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum avf_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define AVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ AVF_RX_PTYPE_OUTER_##OUTER_IP, \
+ AVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ AVF_RX_PTYPE_##OUTER_FRAG, \
+ AVF_RX_PTYPE_TUNNEL_##T, \
+ AVF_RX_PTYPE_TUNNEL_END_##TE, \
+ AVF_RX_PTYPE_##TEF, \
+ AVF_RX_PTYPE_INNER_PROT_##I, \
+ AVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define AVF_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define AVF_RX_PTYPE_NOF AVF_RX_PTYPE_NOT_FRAG
+#define AVF_RX_PTYPE_FRG AVF_RX_PTYPE_FRAG
+#define AVF_RX_PTYPE_INNER_PROT_TS AVF_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct avf_rx_ptype_decoded avf_ptype_lookup[] = {
+ /* L2 Packet types */
+ AVF_PTT_UNUSED_ENTRY(0),
+ AVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ AVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ AVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ AVF_PTT_UNUSED_ENTRY(4),
+ AVF_PTT_UNUSED_ENTRY(5),
+ AVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ AVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ AVF_PTT_UNUSED_ENTRY(8),
+ AVF_PTT_UNUSED_ENTRY(9),
+ AVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ AVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ AVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ AVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(25),
+ AVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ AVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ AVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ AVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(32),
+ AVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ AVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(39),
+ AVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ AVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ AVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(47),
+ AVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ AVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(54),
+ AVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ AVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ AVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(62),
+ AVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ AVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(69),
+ AVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ AVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ AVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(77),
+ AVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ AVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(84),
+ AVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ AVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ AVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(91),
+ AVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ AVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ AVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ AVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(98),
+ AVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ AVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(105),
+ AVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ AVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ AVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(113),
+ AVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ AVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(120),
+ AVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ AVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ AVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(128),
+ AVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ AVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(135),
+ AVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ AVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ AVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ AVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ AVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(143),
+ AVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ AVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ AVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ AVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ AVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ AVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ AVF_PTT_UNUSED_ENTRY(150),
+ AVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ AVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ AVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ AVF_PTT_UNUSED_ENTRY(154),
+ AVF_PTT_UNUSED_ENTRY(155),
+ AVF_PTT_UNUSED_ENTRY(156),
+ AVF_PTT_UNUSED_ENTRY(157),
+ AVF_PTT_UNUSED_ENTRY(158),
+ AVF_PTT_UNUSED_ENTRY(159),
+
+ AVF_PTT_UNUSED_ENTRY(160),
+ AVF_PTT_UNUSED_ENTRY(161),
+ AVF_PTT_UNUSED_ENTRY(162),
+ AVF_PTT_UNUSED_ENTRY(163),
+ AVF_PTT_UNUSED_ENTRY(164),
+ AVF_PTT_UNUSED_ENTRY(165),
+ AVF_PTT_UNUSED_ENTRY(166),
+ AVF_PTT_UNUSED_ENTRY(167),
+ AVF_PTT_UNUSED_ENTRY(168),
+ AVF_PTT_UNUSED_ENTRY(169),
+
+ AVF_PTT_UNUSED_ENTRY(170),
+ AVF_PTT_UNUSED_ENTRY(171),
+ AVF_PTT_UNUSED_ENTRY(172),
+ AVF_PTT_UNUSED_ENTRY(173),
+ AVF_PTT_UNUSED_ENTRY(174),
+ AVF_PTT_UNUSED_ENTRY(175),
+ AVF_PTT_UNUSED_ENTRY(176),
+ AVF_PTT_UNUSED_ENTRY(177),
+ AVF_PTT_UNUSED_ENTRY(178),
+ AVF_PTT_UNUSED_ENTRY(179),
+
+ AVF_PTT_UNUSED_ENTRY(180),
+ AVF_PTT_UNUSED_ENTRY(181),
+ AVF_PTT_UNUSED_ENTRY(182),
+ AVF_PTT_UNUSED_ENTRY(183),
+ AVF_PTT_UNUSED_ENTRY(184),
+ AVF_PTT_UNUSED_ENTRY(185),
+ AVF_PTT_UNUSED_ENTRY(186),
+ AVF_PTT_UNUSED_ENTRY(187),
+ AVF_PTT_UNUSED_ENTRY(188),
+ AVF_PTT_UNUSED_ENTRY(189),
+
+ AVF_PTT_UNUSED_ENTRY(190),
+ AVF_PTT_UNUSED_ENTRY(191),
+ AVF_PTT_UNUSED_ENTRY(192),
+ AVF_PTT_UNUSED_ENTRY(193),
+ AVF_PTT_UNUSED_ENTRY(194),
+ AVF_PTT_UNUSED_ENTRY(195),
+ AVF_PTT_UNUSED_ENTRY(196),
+ AVF_PTT_UNUSED_ENTRY(197),
+ AVF_PTT_UNUSED_ENTRY(198),
+ AVF_PTT_UNUSED_ENTRY(199),
+
+ AVF_PTT_UNUSED_ENTRY(200),
+ AVF_PTT_UNUSED_ENTRY(201),
+ AVF_PTT_UNUSED_ENTRY(202),
+ AVF_PTT_UNUSED_ENTRY(203),
+ AVF_PTT_UNUSED_ENTRY(204),
+ AVF_PTT_UNUSED_ENTRY(205),
+ AVF_PTT_UNUSED_ENTRY(206),
+ AVF_PTT_UNUSED_ENTRY(207),
+ AVF_PTT_UNUSED_ENTRY(208),
+ AVF_PTT_UNUSED_ENTRY(209),
+
+ AVF_PTT_UNUSED_ENTRY(210),
+ AVF_PTT_UNUSED_ENTRY(211),
+ AVF_PTT_UNUSED_ENTRY(212),
+ AVF_PTT_UNUSED_ENTRY(213),
+ AVF_PTT_UNUSED_ENTRY(214),
+ AVF_PTT_UNUSED_ENTRY(215),
+ AVF_PTT_UNUSED_ENTRY(216),
+ AVF_PTT_UNUSED_ENTRY(217),
+ AVF_PTT_UNUSED_ENTRY(218),
+ AVF_PTT_UNUSED_ENTRY(219),
+
+ AVF_PTT_UNUSED_ENTRY(220),
+ AVF_PTT_UNUSED_ENTRY(221),
+ AVF_PTT_UNUSED_ENTRY(222),
+ AVF_PTT_UNUSED_ENTRY(223),
+ AVF_PTT_UNUSED_ENTRY(224),
+ AVF_PTT_UNUSED_ENTRY(225),
+ AVF_PTT_UNUSED_ENTRY(226),
+ AVF_PTT_UNUSED_ENTRY(227),
+ AVF_PTT_UNUSED_ENTRY(228),
+ AVF_PTT_UNUSED_ENTRY(229),
+
+ AVF_PTT_UNUSED_ENTRY(230),
+ AVF_PTT_UNUSED_ENTRY(231),
+ AVF_PTT_UNUSED_ENTRY(232),
+ AVF_PTT_UNUSED_ENTRY(233),
+ AVF_PTT_UNUSED_ENTRY(234),
+ AVF_PTT_UNUSED_ENTRY(235),
+ AVF_PTT_UNUSED_ENTRY(236),
+ AVF_PTT_UNUSED_ENTRY(237),
+ AVF_PTT_UNUSED_ENTRY(238),
+ AVF_PTT_UNUSED_ENTRY(239),
+
+ AVF_PTT_UNUSED_ENTRY(240),
+ AVF_PTT_UNUSED_ENTRY(241),
+ AVF_PTT_UNUSED_ENTRY(242),
+ AVF_PTT_UNUSED_ENTRY(243),
+ AVF_PTT_UNUSED_ENTRY(244),
+ AVF_PTT_UNUSED_ENTRY(245),
+ AVF_PTT_UNUSED_ENTRY(246),
+ AVF_PTT_UNUSED_ENTRY(247),
+ AVF_PTT_UNUSED_ENTRY(248),
+ AVF_PTT_UNUSED_ENTRY(249),
+
+ AVF_PTT_UNUSED_ENTRY(250),
+ AVF_PTT_UNUSED_ENTRY(251),
+ AVF_PTT_UNUSED_ENTRY(252),
+ AVF_PTT_UNUSED_ENTRY(253),
+ AVF_PTT_UNUSED_ENTRY(254),
+ AVF_PTT_UNUSED_ENTRY(255)
+};
+
+
+/**
+ * avf_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum avf_status_code avf_validate_mac_addr(u8 *mac_addr)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+
+ DEBUGFUNC("avf_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (AVF_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = AVF_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * avf_aq_rx_ctl_read_register - use FW to read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: ptr to register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to read the Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+enum avf_status_code avf_aq_rx_ctl_read_register(struct avf_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_rx_ctl_reg_read_write *cmd_resp =
+ (struct avf_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ enum avf_status_code status;
+
+ if (reg_val == NULL)
+ return AVF_ERR_PARAM;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_rx_ctl_reg_read);
+
+ cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == AVF_SUCCESS)
+ *reg_val = LE32_TO_CPU(cmd_resp->value);
+
+ return status;
+}
+
+/**
+ * avf_read_rx_ctl - read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ **/
+u32 avf_read_rx_ctl(struct avf_hw *hw, u32 reg_addr)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+ bool use_register;
+ int retry = 5;
+ u32 val = 0;
+
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == AVF_MAC_X722));
+ if (!use_register) {
+do_retry:
+ status = avf_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
+ if (hw->aq.asq_last_status == AVF_AQ_RC_EAGAIN && retry) {
+ avf_msec_delay(1);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ val = rd32(hw, reg_addr);
+
+ return val;
+}
+
+/**
+ * avf_aq_rx_ctl_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to write to an Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+enum avf_status_code avf_aq_rx_ctl_write_register(struct avf_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_rx_ctl_reg_read_write *cmd =
+ (struct avf_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_rx_ctl_reg_write);
+
+ cmd->address = CPU_TO_LE32(reg_addr);
+ cmd->value = CPU_TO_LE32(reg_val);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_write_rx_ctl - write to an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ **/
+void avf_write_rx_ctl(struct avf_hw *hw, u32 reg_addr, u32 reg_val)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+ bool use_register;
+ int retry = 5;
+
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == AVF_MAC_X722));
+ if (!use_register) {
+do_retry:
+ status = avf_aq_rx_ctl_write_register(hw, reg_addr,
+ reg_val, NULL);
+ if (hw->aq.asq_last_status == AVF_AQ_RC_EAGAIN && retry) {
+ avf_msec_delay(1);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ wr32(hw, reg_addr, reg_val);
+}
+
+/**
+ * avf_aq_set_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write the external PHY register.
+ **/
+enum avf_status_code avf_aq_set_phy_register(struct avf_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_phy_register_access *cmd =
+ (struct avf_aqc_phy_register_access *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_set_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = CPU_TO_LE32(reg_addr);
+ cmd->reg_value = CPU_TO_LE32(reg_val);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_aq_get_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the external PHY register.
+ **/
+enum avf_status_code avf_aq_get_phy_register(struct avf_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_phy_register_access *cmd =
+ (struct avf_aqc_phy_register_access *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_get_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_addres = dev_addr;
+ cmd->reg_address = CPU_TO_LE32(reg_addr);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (!status)
+ *reg_val = LE32_TO_CPU(cmd->reg_value);
+
+ return status;
+}
+
+
+/**
+ * avf_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. avf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum avf_status_code avf_aq_send_msg_to_pf(struct avf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum avf_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_asq_cmd_details details;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_send_msg_to_pf);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_SI);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(AVF_AQ_FLAG_BUF
+ | AVF_AQ_FLAG_RD));
+ if (msglen > AVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ if (!cmd_details) {
+ avf_memset(&details, 0, sizeof(details), AVF_NONDMA_MEM);
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = avf_asq_send_command(hw, (struct avf_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * avf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void avf_parse_hw_config(struct avf_hw *hw,
+ struct virtchnl_vf_resource *msg)
+{
+ struct virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.iwarp = (msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
+ avf_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+ ETH_ALEN,
+ AVF_NONDMA_TO_NONDMA);
+ avf_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN,
+ AVF_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * avf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum avf_status_code avf_reset(struct avf_hw *hw)
+{
+ return avf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
+ AVF_SUCCESS, NULL, 0, NULL);
+}
+
+/**
+ * avf_aq_set_arp_proxy_config
+ * @hw: pointer to the HW structure
+ * @proxy_config: pointer to proxy config command table struct
+ * @cmd_details: pointer to command details
+ *
+ * Set ARP offload parameters from pre-populated
+ * avf_aqc_arp_proxy_data struct
+ **/
+enum avf_status_code avf_aq_set_arp_proxy_config(struct avf_hw *hw,
+ struct avf_aqc_arp_proxy_data *proxy_config,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ enum avf_status_code status;
+
+ if (!proxy_config)
+ return AVF_ERR_PARAM;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_set_proxy_config);
+
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_RD);
+ desc.params.external.addr_high =
+ CPU_TO_LE32(AVF_HI_DWORD((u64)proxy_config));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(AVF_LO_DWORD((u64)proxy_config));
+ desc.datalen = CPU_TO_LE16(sizeof(struct avf_aqc_arp_proxy_data));
+
+ status = avf_asq_send_command(hw, &desc, proxy_config,
+ sizeof(struct avf_aqc_arp_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_aq_opc_set_ns_proxy_table_entry
+ * @hw: pointer to the HW structure
+ * @ns_proxy_table_entry: pointer to NS table entry command struct
+ * @cmd_details: pointer to command details
+ *
+ * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
+ * from pre-populated avf_aqc_ns_proxy_data struct
+ **/
+enum avf_status_code avf_aq_set_ns_proxy_table_entry(struct avf_hw *hw,
+ struct avf_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ enum avf_status_code status;
+
+ if (!ns_proxy_table_entry)
+ return AVF_ERR_PARAM;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_set_ns_proxy_table_entry);
+
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_RD);
+ desc.params.external.addr_high =
+ CPU_TO_LE32(AVF_HI_DWORD((u64)ns_proxy_table_entry));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(AVF_LO_DWORD((u64)ns_proxy_table_entry));
+ desc.datalen = CPU_TO_LE16(sizeof(struct avf_aqc_ns_proxy_data));
+
+ status = avf_asq_send_command(hw, &desc, ns_proxy_table_entry,
+ sizeof(struct avf_aqc_ns_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_aq_set_clear_wol_filter
+ * @hw: pointer to the hw struct
+ * @filter_index: index of filter to modify (0-7)
+ * @filter: buffer containing filter to be set
+ * @set_filter: true to set filter, false to clear filter
+ * @no_wol_tco: if true, pass through packets cannot cause wake-up
+ * if false, pass through packets may cause wake-up
+ * @filter_valid: true if filter action is valid
+ * @no_wol_tco_valid: true if no WoL in TCO traffic action valid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear WoL filter for port attached to the PF
+ **/
+enum avf_status_code avf_aq_set_clear_wol_filter(struct avf_hw *hw,
+ u8 filter_index,
+ struct avf_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_set_wol_filter *cmd =
+ (struct avf_aqc_set_wol_filter *)&desc.params.raw;
+ enum avf_status_code status;
+ u16 cmd_flags = 0;
+ u16 valid_flags = 0;
+ u16 buff_len = 0;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_set_wol_filter);
+
+ if (filter_index >= AVF_AQC_MAX_NUM_WOL_FILTERS)
+ return AVF_ERR_PARAM;
+ cmd->filter_index = CPU_TO_LE16(filter_index);
+
+ if (set_filter) {
+ if (!filter)
+ return AVF_ERR_PARAM;
+
+ cmd_flags |= AVF_AQC_SET_WOL_FILTER;
+ cmd_flags |= AVF_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
+ }
+
+ if (no_wol_tco)
+ cmd_flags |= AVF_AQC_SET_WOL_FILTER_NO_TCO_WOL;
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+ if (filter_valid)
+ valid_flags |= AVF_AQC_SET_WOL_FILTER_ACTION_VALID;
+ if (no_wol_tco_valid)
+ valid_flags |= AVF_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
+ cmd->valid_flags = CPU_TO_LE16(valid_flags);
+
+ buff_len = sizeof(*filter);
+ desc.datalen = CPU_TO_LE16(buff_len);
+
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_RD);
+
+ cmd->address_high = CPU_TO_LE32(AVF_HI_DWORD((u64)filter));
+ cmd->address_low = CPU_TO_LE32(AVF_LO_DWORD((u64)filter));
+
+ status = avf_asq_send_command(hw, &desc, filter,
+ buff_len, cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_aq_get_wake_event_reason
+ * @hw: pointer to the hw struct
+ * @wake_reason: return value, index of matching filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get information for the reason of a Wake Up event
+ **/
+enum avf_status_code avf_aq_get_wake_event_reason(struct avf_hw *hw,
+ u16 *wake_reason,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_get_wake_reason_completion *resp =
+ (struct avf_aqc_get_wake_reason_completion *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc, avf_aqc_opc_get_wake_reason);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == AVF_SUCCESS)
+ *wake_reason = LE16_TO_CPU(resp->wake_reason);
+
+ return status;
+}
+
+/**
+* avf_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum avf_status_code avf_aq_clear_all_wol_filters(struct avf_hw *hw,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_clear_all_wol_filters);
+
+ status = avf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_aq_write_ddp - Write dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+avf_status_code avf_aq_write_ddp(struct avf_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_write_personalization_profile *cmd =
+ (struct avf_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct avf_aqc_write_ddp_resp *resp;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_write_personalization_profile);
+
+ desc.flags |= CPU_TO_LE16(AVF_AQ_FLAG_BUF | AVF_AQ_FLAG_RD);
+ if (buff_size > AVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->profile_track_id = CPU_TO_LE32(track_id);
+
+ status = avf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct avf_aqc_write_ddp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * avf_aq_get_ddp_list - Read dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @flags: AdminQ command flags
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+avf_status_code avf_aq_get_ddp_list(struct avf_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct avf_asq_cmd_details *cmd_details)
+{
+ struct avf_aq_desc desc;
+ struct avf_aqc_get_applied_profiles *cmd =
+ (struct avf_aqc_get_applied_profiles *)&desc.params.raw;
+ enum avf_status_code status;
+
+ avf_fill_default_direct_cmd_desc(&desc,
+ avf_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_BUF);
+ if (buff_size > AVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->flags = flags;
+
+ status = avf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * avf_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_AVF)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct avf_generic_seg_header *
+avf_find_segment_in_package(u32 segment_type,
+ struct avf_package_header *pkg_hdr)
+{
+ struct avf_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct avf_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/* Get section table in profile */
+#define AVF_SECTION_TABLE(profile, sec_tbl) \
+ do { \
+ struct avf_profile_segment *p = (profile); \
+ u32 count; \
+ u32 *nvm; \
+ count = p->device_table_count; \
+ nvm = (u32 *)&p->device_table[count]; \
+ sec_tbl = (struct avf_section_table *)&nvm[nvm[0] + 1]; \
+ } while (0)
+
+/* Get section header in profile */
+#define AVF_SECTION_HEADER(profile, offset) \
+ (struct avf_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * avf_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the avf segment header to be searched
+ *
+ * This function searches avf segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct avf_profile_section_header *
+avf_find_section_in_profile(u32 section_type,
+ struct avf_profile_segment *profile)
+{
+ struct avf_profile_section_header *sec;
+ struct avf_section_table *sec_tbl;
+ u32 sec_off;
+ u32 i;
+
+ if (profile->header.type != SEGMENT_TYPE_AVF)
+ return NULL;
+
+ AVF_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = AVF_SECTION_HEADER(profile, sec_off);
+ if (sec->section.type == section_type)
+ return sec;
+ }
+
+ return NULL;
+}
+
+/**
+ * avf_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+STATIC enum
+avf_status_code avf_ddp_exec_aq_section(struct avf_hw *hw,
+ struct avf_profile_aq_section *aq)
+{
+ enum avf_status_code status;
+ struct avf_aq_desc desc;
+ u8 *msg = NULL;
+ u16 msglen;
+
+ avf_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ desc.flags |= CPU_TO_LE16(aq->flags);
+ avf_memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw),
+ AVF_NONDMA_TO_NONDMA);
+
+ msglen = aq->datalen;
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(AVF_AQ_FLAG_BUF |
+ AVF_AQ_FLAG_RD));
+ if (msglen > AVF_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)AVF_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ msg = &aq->data[0];
+ }
+
+ status = avf_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+ if (status != AVF_SUCCESS) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "unable to exec DDP AQ opcode %u, error %d\n",
+ aq->opcode, status);
+ return status;
+ }
+
+ /* copy returned desc to aq_buf */
+ avf_memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw),
+ AVF_NONDMA_TO_NONDMA);
+
+ return AVF_SUCCESS;
+}
+
+/**
+ * avf_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+STATIC enum avf_status_code
+avf_validate_profile(struct avf_hw *hw, struct avf_profile_segment *profile,
+ u32 track_id, bool rollback)
+{
+ struct avf_profile_section_header *sec = NULL;
+ enum avf_status_code status = AVF_SUCCESS;
+ struct avf_section_table *sec_tbl;
+ u32 vendor_dev_id;
+ u32 dev_cnt;
+ u32 sec_off;
+ u32 i;
+
+ if (track_id == AVF_DDP_TRACKID_INVALID) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE, "Invalid track_id\n");
+ return AVF_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == AVF_INTEL_VENDOR_ID &&
+ hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (dev_cnt && (i == dev_cnt)) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Device doesn't support DDP\n");
+ return AVF_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ AVF_SECTION_TABLE(profile, sec_tbl);
+
+ /* Validate sections types */
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = AVF_SECTION_HEADER(profile, sec_off);
+ if (rollback) {
+ if (sec->section.type == SECTION_TYPE_MMIO ||
+ sec->section.type == SECTION_TYPE_AQ ||
+ sec->section.type == SECTION_TYPE_RB_AQ) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Not a roll-back package\n");
+ return AVF_NOT_SUPPORTED;
+ }
+ } else {
+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
+ sec->section.type == SECTION_TYPE_RB_MMIO) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Not an original package\n");
+ return AVF_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+ * avf_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum avf_status_code
+avf_write_profile(struct avf_hw *hw, struct avf_profile_segment *profile,
+ u32 track_id)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+ struct avf_section_table *sec_tbl;
+ struct avf_profile_section_header *sec = NULL;
+ struct avf_profile_aq_section *ddp_aq;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 sec_off;
+ u32 i;
+
+ status = avf_validate_profile(hw, profile, track_id, false);
+ if (status)
+ return status;
+
+ AVF_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = AVF_SECTION_HEADER(profile, sec_off);
+ /* Process generic admin command */
+ if (sec->section.type == SECTION_TYPE_AQ) {
+ ddp_aq = (struct avf_profile_aq_section *)&sec[1];
+ status = avf_ddp_exec_aq_section(hw, ddp_aq);
+ if (status) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Failed to execute aq: section %d, opcode %u\n",
+ i, ddp_aq->opcode);
+ break;
+ }
+ sec->section.type = SECTION_TYPE_RB_AQ;
+ }
+
+ /* Skip any non-mmio sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct avf_profile_section_header);
+
+ /* Write MMIO section */
+ status = avf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * avf_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum avf_status_code
+avf_rollback_profile(struct avf_hw *hw, struct avf_profile_segment *profile,
+ u32 track_id)
+{
+ struct avf_profile_section_header *sec = NULL;
+ enum avf_status_code status = AVF_SUCCESS;
+ struct avf_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ u32 sec_off;
+ int i;
+
+ status = avf_validate_profile(hw, profile, track_id, true);
+ if (status)
+ return status;
+
+ AVF_SECTION_TABLE(profile, sec_tbl);
+
+ /* For rollback write sections in reverse */
+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = AVF_SECTION_HEADER(profile, sec_off);
+
+ /* Skip any non-rollback sections */
+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct avf_profile_section_header);
+
+ /* Write roll-back MMIO section */
+ status = avf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ avf_debug(hw, AVF_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * avf_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum avf_status_code
+avf_add_pinfo_to_list(struct avf_hw *hw,
+ struct avf_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ enum avf_status_code status = AVF_SUCCESS;
+ struct avf_profile_section_header *sec = NULL;
+ struct avf_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct avf_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct avf_profile_section_header) +
+ sizeof(struct avf_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct avf_profile_section_header);
+ sec->section.size = sizeof(struct avf_profile_info);
+ pinfo = (struct avf_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = AVF_DDP_ADD_TRACKID;
+ avf_memcpy(pinfo->name, profile->name, AVF_DDP_NAME_SIZE,
+ AVF_NONDMA_TO_NONDMA);
+
+ status = avf_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/drivers/net/avf/base/avf_devids.h b/drivers/net/avf/base/avf_devids.h
new file mode 100644
index 00000000..7d9fed25
--- /dev/null
+++ b/drivers/net/avf/base/avf_devids.h
@@ -0,0 +1,43 @@
+/*******************************************************************************
+
+Copyright (c) 2017, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_DEVIDS_H_
+#define _AVF_DEVIDS_H_
+
+/* Vendor ID */
+#define AVF_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define AVF_DEV_ID_ADAPTIVE_VF 0x1889
+
+#endif /* _AVF_DEVIDS_H_ */
diff --git a/drivers/net/avf/base/avf_hmc.h b/drivers/net/avf/base/avf_hmc.h
new file mode 100644
index 00000000..b9b7b5be
--- /dev/null
+++ b/drivers/net/avf/base/avf_hmc.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_HMC_H_
+#define _AVF_HMC_H_
+
+#define AVF_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct avf_hw;
+
+#define AVF_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define AVF_HMC_PD_CNT_IN_SD 512
+#define AVF_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define AVF_HMC_PAGED_BP_SIZE 4096
+#define AVF_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define AVF_FIRST_VF_FPM_ID 16
+
+struct avf_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum avf_sd_entry_type {
+ AVF_SD_TYPE_INVALID = 0,
+ AVF_SD_TYPE_PAGED = 1,
+ AVF_SD_TYPE_DIRECT = 2
+};
+
+struct avf_hmc_bp {
+ enum avf_sd_entry_type entry_type;
+ struct avf_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct avf_hmc_pd_entry {
+ struct avf_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg;
+ bool valid;
+};
+
+struct avf_hmc_pd_table {
+ struct avf_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct avf_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct avf_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct avf_hmc_sd_entry {
+ enum avf_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct avf_hmc_pd_table pd_table;
+ struct avf_hmc_bp bp;
+ } u;
+};
+
+struct avf_hmc_sd_table {
+ struct avf_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct avf_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct avf_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct avf_hmc_obj_info *hmc_obj;
+ struct avf_virt_mem hmc_obj_virt_mem;
+ struct avf_hmc_sd_table sd_table;
+};
+
+#define AVF_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define AVF_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define AVF_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define AVF_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define AVF_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define AVF_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * AVF_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define AVF_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(AVF_HI_DWORD(pa)); \
+ val2 = (u32)(pa) | (AVF_HMC_MAX_BP_COUNT << \
+ AVF_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == AVF_SD_TYPE_PAGED) ? 0 : 1) << \
+ AVF_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ BIT(AVF_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(AVF_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), AVF_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), AVF_PFHMC_SDDATALOW, val2); \
+ wr32((hw), AVF_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * AVF_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define AVF_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (AVF_HMC_MAX_BP_COUNT << \
+ AVF_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == AVF_SD_TYPE_PAGED) ? 0 : 1) << \
+ AVF_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(AVF_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), AVF_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), AVF_PFHMC_SDDATALOW, val2); \
+ wr32((hw), AVF_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * AVF_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define AVF_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), AVF_PFHMC_PDINV, \
+ (((sd_idx) << AVF_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << AVF_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * AVF_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by avf_hmc_rsrc_type.
+ **/
+#define AVF_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / AVF_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / AVF_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * AVF_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by avf_hmc_rsrc_type.
+ **/
+#define AVF_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / AVF_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / AVF_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+enum avf_status_code avf_add_sd_table_entry(struct avf_hw *hw,
+ struct avf_hmc_info *hmc_info,
+ u32 sd_index,
+ enum avf_sd_entry_type type,
+ u64 direct_mode_sz);
+
+enum avf_status_code avf_add_pd_table_entry(struct avf_hw *hw,
+ struct avf_hmc_info *hmc_info,
+ u32 pd_index,
+ struct avf_dma_mem *rsrc_pg);
+enum avf_status_code avf_remove_pd_bp(struct avf_hw *hw,
+ struct avf_hmc_info *hmc_info,
+ u32 idx);
+enum avf_status_code avf_prep_remove_sd_bp(struct avf_hmc_info *hmc_info,
+ u32 idx);
+enum avf_status_code avf_remove_sd_bp_new(struct avf_hw *hw,
+ struct avf_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+enum avf_status_code avf_prep_remove_pd_page(struct avf_hmc_info *hmc_info,
+ u32 idx);
+enum avf_status_code avf_remove_pd_page_new(struct avf_hw *hw,
+ struct avf_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _AVF_HMC_H_ */
diff --git a/drivers/net/avf/base/avf_lan_hmc.h b/drivers/net/avf/base/avf_lan_hmc.h
new file mode 100644
index 00000000..48805d89
--- /dev/null
+++ b/drivers/net/avf/base/avf_lan_hmc.h
@@ -0,0 +1,200 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_LAN_HMC_H_
+#define _AVF_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct avf_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct avf_hmc_obj_rxq {
+ u16 head;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u64 base;
+ u16 qlen;
+#define AVF_RXQ_CTX_DBUFF_SHIFT 7
+ u16 dbuff; /* bigger than needed, see above for reason */
+#define AVF_RXQ_CTX_HBUFF_SHIFT 6
+ u16 hbuff; /* bigger than needed, see above for reason */
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u32 rxmax; /* bigger than needed, see above for reason */
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
+struct avf_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u8 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum avf_hmc_obj_rx_hsplit_0 {
+ AVF_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ AVF_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct avf_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct avf_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum avf_hmc_lan_object_size {
+ AVF_HMC_LAN_OBJ_SZ_8 = 0x3,
+ AVF_HMC_LAN_OBJ_SZ_16 = 0x4,
+ AVF_HMC_LAN_OBJ_SZ_32 = 0x5,
+ AVF_HMC_LAN_OBJ_SZ_64 = 0x6,
+ AVF_HMC_LAN_OBJ_SZ_128 = 0x7,
+ AVF_HMC_LAN_OBJ_SZ_256 = 0x8,
+ AVF_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define AVF_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define AVF_HMC_OBJ_SIZE_TXQ 128
+#define AVF_HMC_OBJ_SIZE_RXQ 32
+#define AVF_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define AVF_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum avf_hmc_lan_rsrc_type {
+ AVF_HMC_LAN_FULL = 0,
+ AVF_HMC_LAN_TX = 1,
+ AVF_HMC_LAN_RX = 2,
+ AVF_HMC_FCOE_CTX = 3,
+ AVF_HMC_FCOE_FILT = 4,
+ AVF_HMC_LAN_MAX = 5
+};
+
+enum avf_hmc_model {
+ AVF_HMC_MODEL_DIRECT_PREFERRED = 0,
+ AVF_HMC_MODEL_DIRECT_ONLY = 1,
+ AVF_HMC_MODEL_PAGED_ONLY = 2,
+ AVF_HMC_MODEL_UNKNOWN,
+};
+
+struct avf_hmc_lan_create_obj_info {
+ struct avf_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum avf_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct avf_hmc_lan_delete_obj_info {
+ struct avf_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+enum avf_status_code avf_init_lan_hmc(struct avf_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+enum avf_status_code avf_configure_lan_hmc(struct avf_hw *hw,
+ enum avf_hmc_model model);
+enum avf_status_code avf_shutdown_lan_hmc(struct avf_hw *hw);
+
+u64 avf_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num);
+enum avf_status_code avf_get_lan_tx_queue_context(struct avf_hw *hw,
+ u16 queue,
+ struct avf_hmc_obj_txq *s);
+enum avf_status_code avf_clear_lan_tx_queue_context(struct avf_hw *hw,
+ u16 queue);
+enum avf_status_code avf_set_lan_tx_queue_context(struct avf_hw *hw,
+ u16 queue,
+ struct avf_hmc_obj_txq *s);
+enum avf_status_code avf_get_lan_rx_queue_context(struct avf_hw *hw,
+ u16 queue,
+ struct avf_hmc_obj_rxq *s);
+enum avf_status_code avf_clear_lan_rx_queue_context(struct avf_hw *hw,
+ u16 queue);
+enum avf_status_code avf_set_lan_rx_queue_context(struct avf_hw *hw,
+ u16 queue,
+ struct avf_hmc_obj_rxq *s);
+enum avf_status_code avf_create_lan_hmc_object(struct avf_hw *hw,
+ struct avf_hmc_lan_create_obj_info *info);
+enum avf_status_code avf_delete_lan_hmc_object(struct avf_hw *hw,
+ struct avf_hmc_lan_delete_obj_info *info);
+
+#endif /* _AVF_LAN_HMC_H_ */
diff --git a/drivers/net/avf/base/avf_osdep.h b/drivers/net/avf/base/avf_osdep.h
new file mode 100644
index 00000000..9ef45968
--- /dev/null
+++ b/drivers/net/avf/base/avf_osdep.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_OSDEP_H_
+#define _AVF_OSDEP_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#include "../avf_log.h"
+
+#define INLINE inline
+#define STATIC static
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+
+#define __iomem
+#define hw_dbg(hw, S, A...) do {} while (0)
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+#define lower_32_bits(n) ((u32)(n))
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
+
+#ifndef __le16
+#define __le16 uint16_t
+#endif
+#ifndef __le32
+#define __le32 uint32_t
+#endif
+#ifndef __le64
+#define __le64 uint64_t
+#endif
+#ifndef __be16
+#define __be16 uint16_t
+#endif
+#ifndef __be32
+#define __be32 uint32_t
+#endif
+#ifndef __be64
+#define __be64 uint64_t
+#endif
+
+#define FALSE 0
+#define TRUE 1
+#define false 0
+#define true 1
+
+#define min(a,b) RTE_MIN(a,b)
+#define max(a,b) RTE_MAX(a,b)
+
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#define ASSERT(x) if(!(x)) rte_panic("AVF: x")
+
+#define DEBUGOUT(S) PMD_DRV_LOG_RAW(DEBUG, S)
+#define DEBUGOUT2(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A)
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+
+#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
+#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
+#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
+#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
+#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
+#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
+
+#define cpu_to_le16(o) rte_cpu_to_le_16(o)
+#define cpu_to_le32(s) rte_cpu_to_le_32(s)
+#define cpu_to_le64(h) rte_cpu_to_le_64(h)
+#define le16_to_cpu(a) rte_le_to_cpu_16(a)
+#define le32_to_cpu(c) rte_le_to_cpu_32(c)
+#define le64_to_cpu(k) rte_le_to_cpu_64(k)
+
+#define avf_memset(a, b, c, d) memset((a), (b), (c))
+#define avf_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
+
+#define avf_usec_delay(x) rte_delay_us(x)
+#define avf_msec_delay(x) rte_delay_us(1000*(x))
+
+#define AVF_PCI_REG(reg) rte_read32(reg)
+#define AVF_PCI_REG_ADDR(a, reg) \
+ ((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
+
+#define AVF_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+#define AVF_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
+static inline
+uint32_t avf_read_addr(volatile void *addr)
+{
+ return rte_le_to_cpu_32(AVF_PCI_REG(addr));
+}
+
+#define AVF_READ_REG(hw, reg) \
+ avf_read_addr(AVF_PCI_REG_ADDR((hw), (reg)))
+#define AVF_WRITE_REG(hw, reg, value) \
+ AVF_PCI_REG_WRITE(AVF_PCI_REG_ADDR((hw), (reg)), (value))
+#define AVF_WRITE_FLUSH(a) \
+ AVF_READ_REG(a, AVFGEN_RSTAT)
+
+#define rd32(a, reg) avf_read_addr(AVF_PCI_REG_ADDR((a), (reg)))
+#define wr32(a, reg, value) \
+ AVF_PCI_REG_WRITE(AVF_PCI_REG_ADDR((a), (reg)), (value))
+
+#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
+
+#define avf_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ PMD_DRV_LOG_RAW(DEBUG, "avf %02x.%x " s, \
+ (h)->bus.device, (h)->bus.func, \
+ ##__VA_ARGS__); \
+} while (0)
+
+/* memory allocation tracking */
+struct avf_dma_mem {
+ void *va;
+ u64 pa;
+ u32 size;
+ const void *zone;
+} __attribute__((packed));
+
+struct avf_virt_mem {
+ void *va;
+ u32 size;
+} __attribute__((packed));
+
+/* SW spinlock */
+struct avf_spinlock {
+ rte_spinlock_t spinlock;
+};
+
+#define avf_allocate_dma_mem(h, m, unused, s, a) \
+ avf_allocate_dma_mem_d(h, m, s, a)
+#define avf_free_dma_mem(h, m) avf_free_dma_mem_d(h, m)
+
+#define avf_allocate_virt_mem(h, m, s) avf_allocate_virt_mem_d(h, m, s)
+#define avf_free_virt_mem(h, m) avf_free_virt_mem_d(h, m)
+
+static inline void
+avf_init_spinlock_d(struct avf_spinlock *sp)
+{
+ rte_spinlock_init(&sp->spinlock);
+}
+
+static inline void
+avf_acquire_spinlock_d(struct avf_spinlock *sp)
+{
+ rte_spinlock_lock(&sp->spinlock);
+}
+
+static inline void
+avf_release_spinlock_d(struct avf_spinlock *sp)
+{
+ rte_spinlock_unlock(&sp->spinlock);
+}
+
+static inline void
+avf_destroy_spinlock_d(__rte_unused struct avf_spinlock *sp)
+{
+}
+
+#define avf_init_spinlock(_sp) avf_init_spinlock_d(_sp)
+#define avf_acquire_spinlock(_sp) avf_acquire_spinlock_d(_sp)
+#define avf_release_spinlock(_sp) avf_release_spinlock_d(_sp)
+#define avf_destroy_spinlock(_sp) avf_destroy_spinlock_d(_sp)
+
+#endif /* _AVF_OSDEP_H_ */
diff --git a/drivers/net/avf/base/avf_prototype.h b/drivers/net/avf/base/avf_prototype.h
new file mode 100644
index 00000000..de031dc6
--- /dev/null
+++ b/drivers/net/avf/base/avf_prototype.h
@@ -0,0 +1,206 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_PROTOTYPE_H_
+#define _AVF_PROTOTYPE_H_
+
+#include "avf_type.h"
+#include "avf_alloc.h"
+#include "virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum avf_status_code avf_init_adminq(struct avf_hw *hw);
+enum avf_status_code avf_shutdown_adminq(struct avf_hw *hw);
+enum avf_status_code avf_init_asq(struct avf_hw *hw);
+enum avf_status_code avf_init_arq(struct avf_hw *hw);
+enum avf_status_code avf_alloc_adminq_asq_ring(struct avf_hw *hw);
+enum avf_status_code avf_alloc_adminq_arq_ring(struct avf_hw *hw);
+enum avf_status_code avf_shutdown_asq(struct avf_hw *hw);
+enum avf_status_code avf_shutdown_arq(struct avf_hw *hw);
+u16 avf_clean_asq(struct avf_hw *hw);
+void avf_free_adminq_asq(struct avf_hw *hw);
+void avf_free_adminq_arq(struct avf_hw *hw);
+enum avf_status_code avf_validate_mac_addr(u8 *mac_addr);
+void avf_adminq_init_ring_data(struct avf_hw *hw);
+enum avf_status_code avf_clean_arq_element(struct avf_hw *hw,
+ struct avf_arq_event_info *e,
+ u16 *events_pending);
+enum avf_status_code avf_asq_send_command(struct avf_hw *hw,
+ struct avf_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct avf_asq_cmd_details *cmd_details);
+bool avf_asq_done(struct avf_hw *hw);
+
+/* debug function for adminq */
+void avf_debug_aq(struct avf_hw *hw, enum avf_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void avf_idle_aq(struct avf_hw *hw);
+bool avf_check_asq_alive(struct avf_hw *hw);
+enum avf_status_code avf_aq_queue_shutdown(struct avf_hw *hw, bool unloading);
+
+enum avf_status_code avf_aq_get_rss_lut(struct avf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum avf_status_code avf_aq_set_rss_lut(struct avf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum avf_status_code avf_aq_get_rss_key(struct avf_hw *hw,
+ u16 seid,
+ struct avf_aqc_get_set_rss_key_data *key);
+enum avf_status_code avf_aq_set_rss_key(struct avf_hw *hw,
+ u16 seid,
+ struct avf_aqc_get_set_rss_key_data *key);
+const char *avf_aq_str(struct avf_hw *hw, enum avf_admin_queue_err aq_err);
+const char *avf_stat_str(struct avf_hw *hw, enum avf_status_code stat_err);
+
+
+enum avf_status_code avf_set_mac_type(struct avf_hw *hw);
+
+extern struct avf_rx_ptype_decoded avf_ptype_lookup[];
+
+STATIC INLINE struct avf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return avf_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void avf_init_spinlock(struct avf_spinlock *sp);
+void avf_acquire_spinlock(struct avf_spinlock *sp);
+void avf_release_spinlock(struct avf_spinlock *sp);
+void avf_destroy_spinlock(struct avf_spinlock *sp);
+
+/* avf_common for VF drivers*/
+void avf_parse_hw_config(struct avf_hw *hw,
+ struct virtchnl_vf_resource *msg);
+enum avf_status_code avf_reset(struct avf_hw *hw);
+enum avf_status_code avf_aq_send_msg_to_pf(struct avf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ enum avf_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_set_filter_control(struct avf_hw *hw,
+ struct avf_filter_control_settings *settings);
+enum avf_status_code avf_aq_add_rem_control_packet_filter(struct avf_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct avf_control_filter_stats *stats,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_debug_dump(struct avf_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct avf_asq_cmd_details *cmd_details);
+void avf_add_filter_to_drop_tx_flow_control_frames(struct avf_hw *hw,
+ u16 vsi_seid);
+enum avf_status_code avf_aq_rx_ctl_read_register(struct avf_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct avf_asq_cmd_details *cmd_details);
+u32 avf_read_rx_ctl(struct avf_hw *hw, u32 reg_addr);
+enum avf_status_code avf_aq_rx_ctl_write_register(struct avf_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct avf_asq_cmd_details *cmd_details);
+void avf_write_rx_ctl(struct avf_hw *hw, u32 reg_addr, u32 reg_val);
+enum avf_status_code avf_aq_set_phy_register(struct avf_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_get_phy_register(struct avf_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct avf_asq_cmd_details *cmd_details);
+
+enum avf_status_code avf_aq_set_arp_proxy_config(struct avf_hw *hw,
+ struct avf_aqc_arp_proxy_data *proxy_config,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_set_ns_proxy_table_entry(struct avf_hw *hw,
+ struct avf_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_set_clear_wol_filter(struct avf_hw *hw,
+ u8 filter_index,
+ struct avf_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_get_wake_event_reason(struct avf_hw *hw,
+ u16 *wake_reason,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_aq_clear_all_wol_filters(struct avf_hw *hw,
+ struct avf_asq_cmd_details *cmd_details);
+enum avf_status_code avf_read_phy_register_clause22(struct avf_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+enum avf_status_code avf_write_phy_register_clause22(struct avf_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+enum avf_status_code avf_read_phy_register_clause45(struct avf_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum avf_status_code avf_write_phy_register_clause45(struct avf_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+enum avf_status_code avf_read_phy_register(struct avf_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum avf_status_code avf_write_phy_register(struct avf_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+u8 avf_get_phy_address(struct avf_hw *hw, u8 dev_num);
+enum avf_status_code avf_blink_phy_link_led(struct avf_hw *hw,
+ u32 time, u32 interval);
+enum avf_status_code avf_aq_write_ddp(struct avf_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct avf_asq_cmd_details *
+ cmd_details);
+enum avf_status_code avf_aq_get_ddp_list(struct avf_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct avf_asq_cmd_details *
+ cmd_details);
+struct avf_generic_seg_header *
+avf_find_segment_in_package(u32 segment_type,
+ struct avf_package_header *pkg_header);
+struct avf_profile_section_header *
+avf_find_section_in_profile(u32 section_type,
+ struct avf_profile_segment *profile);
+enum avf_status_code
+avf_write_profile(struct avf_hw *hw, struct avf_profile_segment *avf_seg,
+ u32 track_id);
+enum avf_status_code
+avf_rollback_profile(struct avf_hw *hw, struct avf_profile_segment *avf_seg,
+ u32 track_id);
+enum avf_status_code
+avf_add_pinfo_to_list(struct avf_hw *hw,
+ struct avf_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
+#endif /* _AVF_PROTOTYPE_H_ */
diff --git a/drivers/net/avf/base/avf_register.h b/drivers/net/avf/base/avf_register.h
new file mode 100644
index 00000000..ba5a9f3f
--- /dev/null
+++ b/drivers/net/avf/base/avf_register.h
@@ -0,0 +1,346 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_REGISTER_H_
+#define _AVF_REGISTER_H_
+
+
+#define AVFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
+#define AVFMSIX_PBA1_MAX_INDEX 19
+#define AVFMSIX_PBA1_PENBIT_SHIFT 0
+#define AVFMSIX_PBA1_PENBIT_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_PBA1_PENBIT_SHIFT)
+#define AVFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define AVFMSIX_TADD1_MAX_INDEX 639
+#define AVFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define AVFMSIX_TADD1_MSIXTADD10_MASK AVF_MASK(0x3, AVFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define AVFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define AVFMSIX_TADD1_MSIXTADD_MASK AVF_MASK(0x3FFFFFFF, AVFMSIX_TADD1_MSIXTADD_SHIFT)
+#define AVFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define AVFMSIX_TMSG1_MAX_INDEX 639
+#define AVFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define AVFMSIX_TMSG1_MSIXTMSG_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define AVFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define AVFMSIX_TUADD1_MAX_INDEX 639
+#define AVFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define AVFMSIX_TUADD1_MSIXTUADD_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define AVFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define AVFMSIX_TVCTRL1_MAX_INDEX 639
+#define AVFMSIX_TVCTRL1_MASK_SHIFT 0
+#define AVFMSIX_TVCTRL1_MASK_MASK AVF_MASK(0x1, AVFMSIX_TVCTRL1_MASK_SHIFT)
+#define AVF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define AVF_ARQBAH1_ARQBAH_SHIFT 0
+#define AVF_ARQBAH1_ARQBAH_MASK AVF_MASK(0xFFFFFFFF, AVF_ARQBAH1_ARQBAH_SHIFT)
+#define AVF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define AVF_ARQBAL1_ARQBAL_SHIFT 0
+#define AVF_ARQBAL1_ARQBAL_MASK AVF_MASK(0xFFFFFFFF, AVF_ARQBAL1_ARQBAL_SHIFT)
+#define AVF_ARQH1 0x00007400 /* Reset: EMPR */
+#define AVF_ARQH1_ARQH_SHIFT 0
+#define AVF_ARQH1_ARQH_MASK AVF_MASK(0x3FF, AVF_ARQH1_ARQH_SHIFT)
+#define AVF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define AVF_ARQLEN1_ARQLEN_SHIFT 0
+#define AVF_ARQLEN1_ARQLEN_MASK AVF_MASK(0x3FF, AVF_ARQLEN1_ARQLEN_SHIFT)
+#define AVF_ARQLEN1_ARQVFE_SHIFT 28
+#define AVF_ARQLEN1_ARQVFE_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQVFE_SHIFT)
+#define AVF_ARQLEN1_ARQOVFL_SHIFT 29
+#define AVF_ARQLEN1_ARQOVFL_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQOVFL_SHIFT)
+#define AVF_ARQLEN1_ARQCRIT_SHIFT 30
+#define AVF_ARQLEN1_ARQCRIT_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQCRIT_SHIFT)
+#define AVF_ARQLEN1_ARQENABLE_SHIFT 31
+#define AVF_ARQLEN1_ARQENABLE_MASK AVF_MASK(0x1, AVF_ARQLEN1_ARQENABLE_SHIFT)
+#define AVF_ARQT1 0x00007000 /* Reset: EMPR */
+#define AVF_ARQT1_ARQT_SHIFT 0
+#define AVF_ARQT1_ARQT_MASK AVF_MASK(0x3FF, AVF_ARQT1_ARQT_SHIFT)
+#define AVF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define AVF_ATQBAH1_ATQBAH_SHIFT 0
+#define AVF_ATQBAH1_ATQBAH_MASK AVF_MASK(0xFFFFFFFF, AVF_ATQBAH1_ATQBAH_SHIFT)
+#define AVF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define AVF_ATQBAL1_ATQBAL_SHIFT 0
+#define AVF_ATQBAL1_ATQBAL_MASK AVF_MASK(0xFFFFFFFF, AVF_ATQBAL1_ATQBAL_SHIFT)
+#define AVF_ATQH1 0x00006400 /* Reset: EMPR */
+#define AVF_ATQH1_ATQH_SHIFT 0
+#define AVF_ATQH1_ATQH_MASK AVF_MASK(0x3FF, AVF_ATQH1_ATQH_SHIFT)
+#define AVF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define AVF_ATQLEN1_ATQLEN_SHIFT 0
+#define AVF_ATQLEN1_ATQLEN_MASK AVF_MASK(0x3FF, AVF_ATQLEN1_ATQLEN_SHIFT)
+#define AVF_ATQLEN1_ATQVFE_SHIFT 28
+#define AVF_ATQLEN1_ATQVFE_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQVFE_SHIFT)
+#define AVF_ATQLEN1_ATQOVFL_SHIFT 29
+#define AVF_ATQLEN1_ATQOVFL_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQOVFL_SHIFT)
+#define AVF_ATQLEN1_ATQCRIT_SHIFT 30
+#define AVF_ATQLEN1_ATQCRIT_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQCRIT_SHIFT)
+#define AVF_ATQLEN1_ATQENABLE_SHIFT 31
+#define AVF_ATQLEN1_ATQENABLE_MASK AVF_MASK(0x1, AVF_ATQLEN1_ATQENABLE_SHIFT)
+#define AVF_ATQT1 0x00008400 /* Reset: EMPR */
+#define AVF_ATQT1_ATQT_SHIFT 0
+#define AVF_ATQT1_ATQT_MASK AVF_MASK(0x3FF, AVF_ATQT1_ATQT_SHIFT)
+#define AVFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define AVFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define AVFGEN_RSTAT_VFR_STATE_MASK AVF_MASK(0x3, AVFGEN_RSTAT_VFR_STATE_SHIFT)
+#define AVFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define AVFINT_DYN_CTL01_INTENA_SHIFT 0
+#define AVFINT_DYN_CTL01_INTENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_INTENA_SHIFT)
+#define AVFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define AVFINT_DYN_CTL01_CLEARPBA_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define AVFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define AVFINT_DYN_CTL01_SWINT_TRIG_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define AVFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define AVFINT_DYN_CTL01_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define AVFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define AVFINT_DYN_CTL01_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define AVFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define AVFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define AVFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define AVFINT_DYN_CTL01_SW_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define AVFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define AVFINT_DYN_CTL01_INTENA_MSK_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define AVFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define AVFINT_DYN_CTLN1_MAX_INDEX 15
+#define AVFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define AVFINT_DYN_CTLN1_INTENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_INTENA_SHIFT)
+#define AVFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define AVFINT_DYN_CTLN1_CLEARPBA_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define AVFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define AVFINT_DYN_CTLN1_SWINT_TRIG_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define AVFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define AVFINT_DYN_CTLN1_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define AVFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define AVFINT_DYN_CTLN1_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define AVFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define AVFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define AVFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define AVFINT_DYN_CTLN1_SW_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define AVFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define AVFINT_DYN_CTLN1_INTENA_MSK_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define AVFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define AVFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define AVFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK AVF_MASK(0x1, AVFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define AVFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define AVFINT_ICR0_ENA1_ADMINQ_MASK AVF_MASK(0x1, AVFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define AVFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define AVFINT_ICR0_ENA1_RSVD_MASK AVF_MASK(0x1, AVFINT_ICR0_ENA1_RSVD_SHIFT)
+#define AVFINT_ICR01 0x00004800 /* Reset: CORER */
+#define AVFINT_ICR01_INTEVENT_SHIFT 0
+#define AVFINT_ICR01_INTEVENT_MASK AVF_MASK(0x1, AVFINT_ICR01_INTEVENT_SHIFT)
+#define AVFINT_ICR01_QUEUE_0_SHIFT 1
+#define AVFINT_ICR01_QUEUE_0_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_0_SHIFT)
+#define AVFINT_ICR01_QUEUE_1_SHIFT 2
+#define AVFINT_ICR01_QUEUE_1_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_1_SHIFT)
+#define AVFINT_ICR01_QUEUE_2_SHIFT 3
+#define AVFINT_ICR01_QUEUE_2_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_2_SHIFT)
+#define AVFINT_ICR01_QUEUE_3_SHIFT 4
+#define AVFINT_ICR01_QUEUE_3_MASK AVF_MASK(0x1, AVFINT_ICR01_QUEUE_3_SHIFT)
+#define AVFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define AVFINT_ICR01_LINK_STAT_CHANGE_MASK AVF_MASK(0x1, AVFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define AVFINT_ICR01_ADMINQ_SHIFT 30
+#define AVFINT_ICR01_ADMINQ_MASK AVF_MASK(0x1, AVFINT_ICR01_ADMINQ_SHIFT)
+#define AVFINT_ICR01_SWINT_SHIFT 31
+#define AVFINT_ICR01_SWINT_MASK AVF_MASK(0x1, AVFINT_ICR01_SWINT_SHIFT)
+#define AVFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define AVFINT_ITR01_MAX_INDEX 2
+#define AVFINT_ITR01_INTERVAL_SHIFT 0
+#define AVFINT_ITR01_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_ITR01_INTERVAL_SHIFT)
+#define AVFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define AVFINT_ITRN1_MAX_INDEX 2
+#define AVFINT_ITRN1_INTERVAL_SHIFT 0
+#define AVFINT_ITRN1_INTERVAL_MASK AVF_MASK(0xFFF, AVFINT_ITRN1_INTERVAL_SHIFT)
+#define AVFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
+#define AVFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define AVFINT_STAT_CTL01_OTHER_ITR_INDX_MASK AVF_MASK(0x3, AVFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define AVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define AVF_QRX_TAIL1_MAX_INDEX 15
+#define AVF_QRX_TAIL1_TAIL_SHIFT 0
+#define AVF_QRX_TAIL1_TAIL_MASK AVF_MASK(0x1FFF, AVF_QRX_TAIL1_TAIL_SHIFT)
+#define AVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define AVF_QTX_TAIL1_MAX_INDEX 15
+#define AVF_QTX_TAIL1_TAIL_SHIFT 0
+#define AVF_QTX_TAIL1_TAIL_MASK AVF_MASK(0x1FFF, AVF_QTX_TAIL1_TAIL_SHIFT)
+#define AVFMSIX_PBA 0x00002000 /* Reset: VFLR */
+#define AVFMSIX_PBA_PENBIT_SHIFT 0
+#define AVFMSIX_PBA_PENBIT_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_PBA_PENBIT_SHIFT)
+#define AVFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define AVFMSIX_TADD_MAX_INDEX 16
+#define AVFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define AVFMSIX_TADD_MSIXTADD10_MASK AVF_MASK(0x3, AVFMSIX_TADD_MSIXTADD10_SHIFT)
+#define AVFMSIX_TADD_MSIXTADD_SHIFT 2
+#define AVFMSIX_TADD_MSIXTADD_MASK AVF_MASK(0x3FFFFFFF, AVFMSIX_TADD_MSIXTADD_SHIFT)
+#define AVFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define AVFMSIX_TMSG_MAX_INDEX 16
+#define AVFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define AVFMSIX_TMSG_MSIXTMSG_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define AVFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define AVFMSIX_TUADD_MAX_INDEX 16
+#define AVFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define AVFMSIX_TUADD_MSIXTUADD_MASK AVF_MASK(0xFFFFFFFF, AVFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define AVFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define AVFMSIX_TVCTRL_MAX_INDEX 16
+#define AVFMSIX_TVCTRL_MASK_SHIFT 0
+#define AVFMSIX_TVCTRL_MASK_MASK AVF_MASK(0x1, AVFMSIX_TVCTRL_MASK_SHIFT)
+#define AVFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
+#define AVFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define AVFCM_PE_ERRDATA_ERROR_CODE_MASK AVF_MASK(0xF, AVFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define AVFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define AVFCM_PE_ERRDATA_Q_TYPE_MASK AVF_MASK(0x7, AVFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define AVFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define AVFCM_PE_ERRDATA_Q_NUM_MASK AVF_MASK(0x3FFFF, AVFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define AVFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
+#define AVFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define AVFCM_PE_ERRINFO_ERROR_VALID_MASK AVF_MASK(0x1, AVFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define AVFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define AVFCM_PE_ERRINFO_ERROR_INST_MASK AVF_MASK(0x7, AVFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define AVFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define AVFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK AVF_MASK(0xFF, AVFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define AVFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define AVFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK AVF_MASK(0xFF, AVFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define AVFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define AVFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK AVF_MASK(0xFF, AVFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define AVFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define AVFQF_HENA_MAX_INDEX 1
+#define AVFQF_HENA_PTYPE_ENA_SHIFT 0
+#define AVFQF_HENA_PTYPE_ENA_MASK AVF_MASK(0xFFFFFFFF, AVFQF_HENA_PTYPE_ENA_SHIFT)
+#define AVFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define AVFQF_HKEY_MAX_INDEX 12
+#define AVFQF_HKEY_KEY_0_SHIFT 0
+#define AVFQF_HKEY_KEY_0_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_0_SHIFT)
+#define AVFQF_HKEY_KEY_1_SHIFT 8
+#define AVFQF_HKEY_KEY_1_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_1_SHIFT)
+#define AVFQF_HKEY_KEY_2_SHIFT 16
+#define AVFQF_HKEY_KEY_2_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_2_SHIFT)
+#define AVFQF_HKEY_KEY_3_SHIFT 24
+#define AVFQF_HKEY_KEY_3_MASK AVF_MASK(0xFF, AVFQF_HKEY_KEY_3_SHIFT)
+#define AVFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define AVFQF_HLUT_MAX_INDEX 15
+#define AVFQF_HLUT_LUT0_SHIFT 0
+#define AVFQF_HLUT_LUT0_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT0_SHIFT)
+#define AVFQF_HLUT_LUT1_SHIFT 8
+#define AVFQF_HLUT_LUT1_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT1_SHIFT)
+#define AVFQF_HLUT_LUT2_SHIFT 16
+#define AVFQF_HLUT_LUT2_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT2_SHIFT)
+#define AVFQF_HLUT_LUT3_SHIFT 24
+#define AVFQF_HLUT_LUT3_MASK AVF_MASK(0xF, AVFQF_HLUT_LUT3_SHIFT)
+#define AVFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define AVFQF_HREGION_MAX_INDEX 7
+#define AVFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define AVFQF_HREGION_OVERRIDE_ENA_0_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define AVFQF_HREGION_REGION_0_SHIFT 1
+#define AVFQF_HREGION_REGION_0_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_0_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define AVFQF_HREGION_OVERRIDE_ENA_1_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define AVFQF_HREGION_REGION_1_SHIFT 5
+#define AVFQF_HREGION_REGION_1_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_1_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define AVFQF_HREGION_OVERRIDE_ENA_2_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define AVFQF_HREGION_REGION_2_SHIFT 9
+#define AVFQF_HREGION_REGION_2_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_2_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define AVFQF_HREGION_OVERRIDE_ENA_3_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define AVFQF_HREGION_REGION_3_SHIFT 13
+#define AVFQF_HREGION_REGION_3_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_3_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define AVFQF_HREGION_OVERRIDE_ENA_4_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define AVFQF_HREGION_REGION_4_SHIFT 17
+#define AVFQF_HREGION_REGION_4_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_4_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define AVFQF_HREGION_OVERRIDE_ENA_5_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define AVFQF_HREGION_REGION_5_SHIFT 21
+#define AVFQF_HREGION_REGION_5_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_5_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define AVFQF_HREGION_OVERRIDE_ENA_6_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define AVFQF_HREGION_REGION_6_SHIFT 25
+#define AVFQF_HREGION_REGION_6_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_6_SHIFT)
+#define AVFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define AVFQF_HREGION_OVERRIDE_ENA_7_MASK AVF_MASK(0x1, AVFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define AVFQF_HREGION_REGION_7_SHIFT 29
+#define AVFQF_HREGION_REGION_7_MASK AVF_MASK(0x7, AVFQF_HREGION_REGION_7_SHIFT)
+
+#define AVFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define AVFINT_DYN_CTL01_WB_ON_ITR_MASK AVF_MASK(0x1, AVFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define AVFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define AVFINT_DYN_CTLN1_WB_ON_ITR_MASK AVF_MASK(0x1, AVFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define AVFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define AVFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define AVFPE_AEQALLOC1_AECOUNT_MASK AVF_MASK(0xFFFFFFFF, AVFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define AVFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define AVFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define AVFPE_CCQPHIGH1_PECCQPHIGH_MASK AVF_MASK(0xFFFFFFFF, AVFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define AVFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define AVFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define AVFPE_CCQPLOW1_PECCQPLOW_MASK AVF_MASK(0xFFFFFFFF, AVFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define AVFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define AVFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define AVFPE_CCQPSTATUS1_CCQP_DONE_MASK AVF_MASK(0x1, AVFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define AVFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define AVFPE_CCQPSTATUS1_HMC_PROFILE_MASK AVF_MASK(0x7, AVFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define AVFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define AVFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK AVF_MASK(0x3F, AVFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define AVFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define AVFPE_CCQPSTATUS1_CCQP_ERR_MASK AVF_MASK(0x1, AVFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define AVFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define AVFPE_CQACK1_PECQID_SHIFT 0
+#define AVFPE_CQACK1_PECQID_MASK AVF_MASK(0x1FFFF, AVFPE_CQACK1_PECQID_SHIFT)
+#define AVFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define AVFPE_CQARM1_PECQID_SHIFT 0
+#define AVFPE_CQARM1_PECQID_MASK AVF_MASK(0x1FFFF, AVFPE_CQARM1_PECQID_SHIFT)
+#define AVFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define AVFPE_CQPDB1_WQHEAD_SHIFT 0
+#define AVFPE_CQPDB1_WQHEAD_MASK AVF_MASK(0x7FF, AVFPE_CQPDB1_WQHEAD_SHIFT)
+#define AVFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define AVFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define AVFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK AVF_MASK(0xFFFF, AVFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define AVFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define AVFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK AVF_MASK(0xFFFF, AVFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define AVFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define AVFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define AVFPE_CQPTAIL1_WQTAIL_MASK AVF_MASK(0x7FF, AVFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define AVFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define AVFPE_CQPTAIL1_CQP_OP_ERR_MASK AVF_MASK(0x1, AVFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define AVFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define AVFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define AVFPE_IPCONFIG01_PEIPID_MASK AVF_MASK(0xFFFF, AVFPE_IPCONFIG01_PEIPID_SHIFT)
+#define AVFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define AVFPE_IPCONFIG01_USEENTIREIDRANGE_MASK AVF_MASK(0x1, AVFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define AVFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define AVFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define AVFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK AVF_MASK(0x1F, AVFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define AVFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define AVFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define AVFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK AVF_MASK(0xFFFFFF, AVFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define AVFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define AVFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define AVFPE_TCPNOWTIMER1_TCP_NOW_MASK AVF_MASK(0xFFFFFFFF, AVFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define AVFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define AVFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define AVFPE_WQEALLOC1_PEQPID_MASK AVF_MASK(0x3FFFF, AVFPE_WQEALLOC1_PEQPID_SHIFT)
+#define AVFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define AVFPE_WQEALLOC1_WQE_DESC_INDEX_MASK AVF_MASK(0xFFF, AVFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
+#endif /* _AVF_REGISTER_H_ */
diff --git a/drivers/net/avf/base/avf_status.h b/drivers/net/avf/base/avf_status.h
new file mode 100644
index 00000000..e8a673bd
--- /dev/null
+++ b/drivers/net/avf/base/avf_status.h
@@ -0,0 +1,108 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_STATUS_H_
+#define _AVF_STATUS_H_
+
+/* Error Codes */
+enum avf_status_code {
+ AVF_SUCCESS = 0,
+ AVF_ERR_NVM = -1,
+ AVF_ERR_NVM_CHECKSUM = -2,
+ AVF_ERR_PHY = -3,
+ AVF_ERR_CONFIG = -4,
+ AVF_ERR_PARAM = -5,
+ AVF_ERR_MAC_TYPE = -6,
+ AVF_ERR_UNKNOWN_PHY = -7,
+ AVF_ERR_LINK_SETUP = -8,
+ AVF_ERR_ADAPTER_STOPPED = -9,
+ AVF_ERR_INVALID_MAC_ADDR = -10,
+ AVF_ERR_DEVICE_NOT_SUPPORTED = -11,
+ AVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ AVF_ERR_INVALID_LINK_SETTINGS = -13,
+ AVF_ERR_AUTONEG_NOT_COMPLETE = -14,
+ AVF_ERR_RESET_FAILED = -15,
+ AVF_ERR_SWFW_SYNC = -16,
+ AVF_ERR_NO_AVAILABLE_VSI = -17,
+ AVF_ERR_NO_MEMORY = -18,
+ AVF_ERR_BAD_PTR = -19,
+ AVF_ERR_RING_FULL = -20,
+ AVF_ERR_INVALID_PD_ID = -21,
+ AVF_ERR_INVALID_QP_ID = -22,
+ AVF_ERR_INVALID_CQ_ID = -23,
+ AVF_ERR_INVALID_CEQ_ID = -24,
+ AVF_ERR_INVALID_AEQ_ID = -25,
+ AVF_ERR_INVALID_SIZE = -26,
+ AVF_ERR_INVALID_ARP_INDEX = -27,
+ AVF_ERR_INVALID_FPM_FUNC_ID = -28,
+ AVF_ERR_QP_INVALID_MSG_SIZE = -29,
+ AVF_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ AVF_ERR_INVALID_FRAG_COUNT = -31,
+ AVF_ERR_QUEUE_EMPTY = -32,
+ AVF_ERR_INVALID_ALIGNMENT = -33,
+ AVF_ERR_FLUSHED_QUEUE = -34,
+ AVF_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ AVF_ERR_INVALID_IMM_DATA_SIZE = -36,
+ AVF_ERR_TIMEOUT = -37,
+ AVF_ERR_OPCODE_MISMATCH = -38,
+ AVF_ERR_CQP_COMPL_ERROR = -39,
+ AVF_ERR_INVALID_VF_ID = -40,
+ AVF_ERR_INVALID_HMCFN_ID = -41,
+ AVF_ERR_BACKING_PAGE_ERROR = -42,
+ AVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ AVF_ERR_INVALID_PBLE_INDEX = -44,
+ AVF_ERR_INVALID_SD_INDEX = -45,
+ AVF_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ AVF_ERR_INVALID_SD_TYPE = -47,
+ AVF_ERR_MEMCPY_FAILED = -48,
+ AVF_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ AVF_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ AVF_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ AVF_ERR_SRQ_ENABLED = -52,
+ AVF_ERR_ADMIN_QUEUE_ERROR = -53,
+ AVF_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ AVF_ERR_BUF_TOO_SHORT = -55,
+ AVF_ERR_ADMIN_QUEUE_FULL = -56,
+ AVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ AVF_ERR_BAD_IWARP_CQE = -58,
+ AVF_ERR_NVM_BLANK_MODE = -59,
+ AVF_ERR_NOT_IMPLEMENTED = -60,
+ AVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ AVF_ERR_DIAG_TEST_FAILED = -62,
+ AVF_ERR_NOT_READY = -63,
+ AVF_NOT_SUPPORTED = -64,
+ AVF_ERR_FIRMWARE_API_VERSION = -65,
+ AVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
+};
+
+#endif /* _AVF_STATUS_H_ */
diff --git a/drivers/net/avf/base/avf_type.h b/drivers/net/avf/base/avf_type.h
new file mode 100644
index 00000000..546c6d2a
--- /dev/null
+++ b/drivers/net/avf/base/avf_type.h
@@ -0,0 +1,2024 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _AVF_TYPE_H_
+#define _AVF_TYPE_H_
+
+#include "avf_status.h"
+#include "avf_osdep.h"
+#include "avf_register.h"
+#include "avf_adminq.h"
+#include "avf_hmc.h"
+#include "avf_lan_hmc.h"
+#include "avf_devids.h"
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p) (_p);
+#define UNREFERENCED_2PARAMETER(_p, _q) (_p); (_q);
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) (_p); (_q); (_r);
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) (_p); (_q); (_r); (_s);
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) (_p); (_q); (_r); (_s); (_t);
+
+#ifndef LINUX_MACROS
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#endif /* LINUX_MACROS */
+
+#ifndef AVF_MASK
+/* AVF_MASK is a macro used on 32 bit registers */
+#define AVF_MASK(mask, shift) (mask << shift)
+#endif
+
+#define AVF_MAX_PF 16
+#define AVF_MAX_PF_VSI 64
+#define AVF_MAX_PF_QP 128
+#define AVF_MAX_VSI_QP 16
+#define AVF_MAX_VF_VSI 3
+#define AVF_MAX_CHAINED_RX_BUFFERS 5
+#define AVF_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* something less than 1 minute */
+#define AVF_HEARTBEAT_TIMEOUT (HZ * 50)
+
+/* Max default timeout in ms, */
+#define AVF_MAX_NVM_TIMEOUT 18000
+
+/* Max timeout in ms for the phy to respond */
+#define AVF_MAX_PHY_TIMEOUT 500
+
+/* Check whether address is multicast. */
+#define AVF_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define AVF_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define AVF_MS_TO_GTIME(time) ((time) * 1000)
+
+/* forward declaration */
+struct avf_hw;
+typedef void (*AVF_ADMINQ_CALLBACK)(struct avf_hw *, struct avf_aq_desc *);
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
+/* Data type manipulation macros. */
+#define AVF_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define AVF_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+
+#define AVF_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define AVF_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+#define AVF_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define AVF_LO_BYTE(x) ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define AVF_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define AVF_REQ_RX_DESCRIPTOR_MULTIPLE 32
+
+#define AVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define AVF_QTX_CTL_VF_QUEUE 0x0
+#define AVF_QTX_CTL_VM_QUEUE 0x1
+#define AVF_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum avf_debug_mask {
+ AVF_DEBUG_INIT = 0x00000001,
+ AVF_DEBUG_RELEASE = 0x00000002,
+
+ AVF_DEBUG_LINK = 0x00000010,
+ AVF_DEBUG_PHY = 0x00000020,
+ AVF_DEBUG_HMC = 0x00000040,
+ AVF_DEBUG_NVM = 0x00000080,
+ AVF_DEBUG_LAN = 0x00000100,
+ AVF_DEBUG_FLOW = 0x00000200,
+ AVF_DEBUG_DCB = 0x00000400,
+ AVF_DEBUG_DIAG = 0x00000800,
+ AVF_DEBUG_FD = 0x00001000,
+ AVF_DEBUG_PACKAGE = 0x00002000,
+
+ AVF_DEBUG_AQ_MESSAGE = 0x01000000,
+ AVF_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ AVF_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ AVF_DEBUG_AQ_COMMAND = 0x06000000,
+ AVF_DEBUG_AQ = 0x0F000000,
+
+ AVF_DEBUG_USER = 0xF0000000,
+
+ AVF_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define AVF_PCI_LINK_STATUS 0xB2
+#define AVF_PCI_LINK_WIDTH 0x3F0
+#define AVF_PCI_LINK_WIDTH_1 0x10
+#define AVF_PCI_LINK_WIDTH_2 0x20
+#define AVF_PCI_LINK_WIDTH_4 0x40
+#define AVF_PCI_LINK_WIDTH_8 0x80
+#define AVF_PCI_LINK_SPEED 0xF
+#define AVF_PCI_LINK_SPEED_2500 0x1
+#define AVF_PCI_LINK_SPEED_5000 0x2
+#define AVF_PCI_LINK_SPEED_8000 0x3
+
+#define AVF_MDIO_CLAUSE22_STCODE_MASK AVF_MASK(1, \
+ AVF_GLGEN_MSCA_STCODE_SHIFT)
+#define AVF_MDIO_CLAUSE22_OPCODE_WRITE_MASK AVF_MASK(1, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define AVF_MDIO_CLAUSE22_OPCODE_READ_MASK AVF_MASK(2, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define AVF_MDIO_CLAUSE45_STCODE_MASK AVF_MASK(0, \
+ AVF_GLGEN_MSCA_STCODE_SHIFT)
+#define AVF_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK AVF_MASK(0, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define AVF_MDIO_CLAUSE45_OPCODE_WRITE_MASK AVF_MASK(1, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define AVF_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK AVF_MASK(2, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+#define AVF_MDIO_CLAUSE45_OPCODE_READ_MASK AVF_MASK(3, \
+ AVF_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define AVF_PHY_COM_REG_PAGE 0x1E
+#define AVF_PHY_LED_LINK_MODE_MASK 0xF0
+#define AVF_PHY_LED_MANUAL_ON 0x100
+#define AVF_PHY_LED_PROV_REG_1 0xC430
+#define AVF_PHY_LED_MODE_MASK 0xFFFF
+#define AVF_PHY_LED_MODE_ORIG 0x80000000
+
+/* Memory types */
+enum avf_memset_type {
+ AVF_NONDMA_MEM = 0,
+ AVF_DMA_MEM
+};
+
+/* Memcpy types */
+enum avf_memcpy_type {
+ AVF_NONDMA_TO_NONDMA = 0,
+ AVF_NONDMA_TO_DMA,
+ AVF_DMA_TO_DMA,
+ AVF_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum avf_mac_type {
+ AVF_MAC_UNKNOWN = 0,
+ AVF_MAC_XL710,
+ AVF_MAC_VF,
+ AVF_MAC_X722,
+ AVF_MAC_X722_VF,
+ AVF_MAC_GENERIC,
+};
+
+enum avf_media_type {
+ AVF_MEDIA_TYPE_UNKNOWN = 0,
+ AVF_MEDIA_TYPE_FIBER,
+ AVF_MEDIA_TYPE_BASET,
+ AVF_MEDIA_TYPE_BACKPLANE,
+ AVF_MEDIA_TYPE_CX4,
+ AVF_MEDIA_TYPE_DA,
+ AVF_MEDIA_TYPE_VIRTUAL
+};
+
+enum avf_fc_mode {
+ AVF_FC_NONE = 0,
+ AVF_FC_RX_PAUSE,
+ AVF_FC_TX_PAUSE,
+ AVF_FC_FULL,
+ AVF_FC_PFC,
+ AVF_FC_DEFAULT
+};
+
+enum avf_set_fc_aq_failures {
+ AVF_SET_FC_AQ_FAIL_NONE = 0,
+ AVF_SET_FC_AQ_FAIL_GET = 1,
+ AVF_SET_FC_AQ_FAIL_SET = 2,
+ AVF_SET_FC_AQ_FAIL_UPDATE = 4,
+ AVF_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
+enum avf_vsi_type {
+ AVF_VSI_MAIN = 0,
+ AVF_VSI_VMDQ1 = 1,
+ AVF_VSI_VMDQ2 = 2,
+ AVF_VSI_CTRL = 3,
+ AVF_VSI_FCOE = 4,
+ AVF_VSI_MIRROR = 5,
+ AVF_VSI_SRIOV = 6,
+ AVF_VSI_FDIR = 7,
+ AVF_VSI_TYPE_UNKNOWN
+};
+
+enum avf_queue_type {
+ AVF_QUEUE_TYPE_RX = 0,
+ AVF_QUEUE_TYPE_TX,
+ AVF_QUEUE_TYPE_PE_CEQ,
+ AVF_QUEUE_TYPE_UNKNOWN
+};
+
+struct avf_link_status {
+ enum avf_aq_phy_type phy_type;
+ enum avf_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 req_fec_info;
+ u8 fec_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
+ u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define AVF_MODULE_TYPE_SFP 0x03
+#define AVF_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define AVF_MODULE_TYPE_40G_ACTIVE 0x01
+#define AVF_MODULE_TYPE_40G_LR4 0x02
+#define AVF_MODULE_TYPE_40G_SR4 0x04
+#define AVF_MODULE_TYPE_40G_CR4 0x08
+#define AVF_MODULE_TYPE_10G_BASE_SR 0x10
+#define AVF_MODULE_TYPE_10G_BASE_LR 0x20
+#define AVF_MODULE_TYPE_10G_BASE_LRM 0x40
+#define AVF_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define AVF_MODULE_TYPE_1000BASE_SX 0x01
+#define AVF_MODULE_TYPE_1000BASE_LX 0x02
+#define AVF_MODULE_TYPE_1000BASE_CX 0x04
+#define AVF_MODULE_TYPE_1000BASE_T 0x08
+};
+
+struct avf_phy_info {
+ struct avf_link_status link_info;
+ struct avf_link_status link_info_old;
+ bool get_link_info;
+ enum avf_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ u64 phy_types;
+};
+
+#define AVF_CAP_PHY_TYPE_SGMII BIT_ULL(AVF_PHY_TYPE_SGMII)
+#define AVF_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(AVF_PHY_TYPE_1000BASE_KX)
+#define AVF_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(AVF_PHY_TYPE_10GBASE_KX4)
+#define AVF_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(AVF_PHY_TYPE_10GBASE_KR)
+#define AVF_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(AVF_PHY_TYPE_40GBASE_KR4)
+#define AVF_CAP_PHY_TYPE_XAUI BIT_ULL(AVF_PHY_TYPE_XAUI)
+#define AVF_CAP_PHY_TYPE_XFI BIT_ULL(AVF_PHY_TYPE_XFI)
+#define AVF_CAP_PHY_TYPE_SFI BIT_ULL(AVF_PHY_TYPE_SFI)
+#define AVF_CAP_PHY_TYPE_XLAUI BIT_ULL(AVF_PHY_TYPE_XLAUI)
+#define AVF_CAP_PHY_TYPE_XLPPI BIT_ULL(AVF_PHY_TYPE_XLPPI)
+#define AVF_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(AVF_PHY_TYPE_40GBASE_CR4_CU)
+#define AVF_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(AVF_PHY_TYPE_10GBASE_CR1_CU)
+#define AVF_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(AVF_PHY_TYPE_10GBASE_AOC)
+#define AVF_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(AVF_PHY_TYPE_40GBASE_AOC)
+#define AVF_CAP_PHY_TYPE_100BASE_TX BIT_ULL(AVF_PHY_TYPE_100BASE_TX)
+#define AVF_CAP_PHY_TYPE_1000BASE_T BIT_ULL(AVF_PHY_TYPE_1000BASE_T)
+#define AVF_CAP_PHY_TYPE_10GBASE_T BIT_ULL(AVF_PHY_TYPE_10GBASE_T)
+#define AVF_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(AVF_PHY_TYPE_10GBASE_SR)
+#define AVF_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(AVF_PHY_TYPE_10GBASE_LR)
+#define AVF_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(AVF_PHY_TYPE_10GBASE_SFPP_CU)
+#define AVF_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(AVF_PHY_TYPE_10GBASE_CR1)
+#define AVF_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(AVF_PHY_TYPE_40GBASE_CR4)
+#define AVF_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(AVF_PHY_TYPE_40GBASE_SR4)
+#define AVF_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(AVF_PHY_TYPE_40GBASE_LR4)
+#define AVF_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(AVF_PHY_TYPE_1000BASE_SX)
+#define AVF_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(AVF_PHY_TYPE_1000BASE_LX)
+#define AVF_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(AVF_PHY_TYPE_1000BASE_T_OPTICAL)
+#define AVF_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(AVF_PHY_TYPE_20GBASE_KR2)
+/*
+ * Defining the macro AVF_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the AVF_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the avf_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are AVF_PHY_TYPE_25GBASE_*.
+ */
+#define AVF_PHY_TYPE_OFFSET 1
+#define AVF_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(AVF_PHY_TYPE_25GBASE_KR + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(AVF_PHY_TYPE_25GBASE_CR + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(AVF_PHY_TYPE_25GBASE_SR + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(AVF_PHY_TYPE_25GBASE_LR + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(AVF_PHY_TYPE_25GBASE_AOC + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(AVF_PHY_TYPE_25GBASE_ACC + \
+ AVF_PHY_TYPE_OFFSET)
+#define AVF_HW_CAP_MAX_GPIO 30
+#define AVF_HW_CAP_MDIO_PORT_MODE_MDIO 0
+#define AVF_HW_CAP_MDIO_PORT_MODE_I2C 1
+
+enum avf_acpi_programming_method {
+ AVF_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+ AVF_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define AVF_WOL_SUPPORT_MASK 0x1
+#define AVF_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define AVF_PROXY_SUPPORT_MASK 0x4
+
+/* Capabilities of a PF or a VF or the whole device */
+struct avf_hw_capabilities {
+ u32 switch_mode;
+#define AVF_NVM_IMAGE_TYPE_EVB 0x0
+#define AVF_NVM_IMAGE_TYPE_CLOUD 0x2
+#define AVF_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define AVF_MNG_PROTOCOL_PLDM 0x2
+#define AVF_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define AVF_MNG_PROTOCOL_NCSI 0x8
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool iscsi; /* Indicates iSCSI enabled */
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define AVF_FLEX10_MODE_UNKNOWN 0x0
+#define AVF_FLEX10_MODE_DCC 0x1
+#define AVF_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define AVF_FLEX10_STATUS_DCC_ERROR 0x1
+#define AVF_FLEX10_STATUS_VC_MODE 0x2
+
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define AVF_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define AVF_NVM_MGMT_UPDATE_DISABLED 0x2
+
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[AVF_HW_CAP_MAX_GPIO];
+ bool sdp[AVF_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+ u64 wr_csr_prot;
+ bool apm_wol_support;
+ enum avf_acpi_programming_method acpi_prog_method;
+ bool proxy_support;
+};
+
+struct avf_mac_info {
+ enum avf_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+enum avf_aq_resources_ids {
+ AVF_NVM_RESOURCE_ID = 1
+};
+
+enum avf_aq_resource_access_type {
+ AVF_RESOURCE_READ = 1,
+ AVF_RESOURCE_WRITE
+};
+
+struct avf_nvm_info {
+ u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
+};
+
+/* definitions used in NVM update support */
+
+enum avf_nvmupd_cmd {
+ AVF_NVMUPD_INVALID,
+ AVF_NVMUPD_READ_CON,
+ AVF_NVMUPD_READ_SNT,
+ AVF_NVMUPD_READ_LCB,
+ AVF_NVMUPD_READ_SA,
+ AVF_NVMUPD_WRITE_ERA,
+ AVF_NVMUPD_WRITE_CON,
+ AVF_NVMUPD_WRITE_SNT,
+ AVF_NVMUPD_WRITE_LCB,
+ AVF_NVMUPD_WRITE_SA,
+ AVF_NVMUPD_CSUM_CON,
+ AVF_NVMUPD_CSUM_SA,
+ AVF_NVMUPD_CSUM_LCB,
+ AVF_NVMUPD_STATUS,
+ AVF_NVMUPD_EXEC_AQ,
+ AVF_NVMUPD_GET_AQ_RESULT,
+ AVF_NVMUPD_GET_AQ_EVENT,
+};
+
+enum avf_nvmupd_state {
+ AVF_NVMUPD_STATE_INIT,
+ AVF_NVMUPD_STATE_READING,
+ AVF_NVMUPD_STATE_WRITING,
+ AVF_NVMUPD_STATE_INIT_WAIT,
+ AVF_NVMUPD_STATE_WRITE_WAIT,
+ AVF_NVMUPD_STATE_ERROR
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code. Where is the right file?
+ */
+#define AVF_NVM_READ 0xB
+#define AVF_NVM_WRITE 0xC
+
+#define AVF_NVM_MOD_PNT_MASK 0xFF
+
+#define AVF_NVM_TRANS_SHIFT 8
+#define AVF_NVM_TRANS_MASK (0xf << AVF_NVM_TRANS_SHIFT)
+#define AVF_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define AVF_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << AVF_NVM_PRESERVATION_FLAGS_SHIFT)
+#define AVF_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define AVF_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define AVF_NVM_CON 0x0
+#define AVF_NVM_SNT 0x1
+#define AVF_NVM_LCB 0x2
+#define AVF_NVM_SA (AVF_NVM_SNT | AVF_NVM_LCB)
+#define AVF_NVM_ERA 0x4
+#define AVF_NVM_CSUM 0x8
+#define AVF_NVM_AQE 0xe
+#define AVF_NVM_EXEC 0xf
+
+#define AVF_NVM_ADAPT_SHIFT 16
+#define AVF_NVM_ADAPT_MASK (0xffffULL << AVF_NVM_ADAPT_SHIFT)
+
+#define AVF_NVMUPD_MAX_DATA 4096
+#define AVF_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct avf_nvm_access {
+ u32 command;
+ u32 config;
+ u32 offset; /* in bytes */
+ u32 data_size; /* in bytes */
+ u8 data[1];
+};
+
+/* (Q)SFP module access definitions */
+#define AVF_I2C_EEPROM_DEV_ADDR 0xA0
+#define AVF_I2C_EEPROM_DEV_ADDR2 0xA2
+#define AVF_MODULE_TYPE_ADDR 0x00
+#define AVF_MODULE_REVISION_ADDR 0x01
+#define AVF_MODULE_SFF_8472_COMP 0x5E
+#define AVF_MODULE_SFF_8472_SWAP 0x5C
+#define AVF_MODULE_SFF_ADDR_MODE 0x04
+#define AVF_MODULE_SFF_DIAG_CAPAB 0x40
+#define AVF_MODULE_TYPE_QSFP_PLUS 0x0D
+#define AVF_MODULE_TYPE_QSFP28 0x11
+#define AVF_MODULE_QSFP_MAX_LEN 640
+
+/* PCI bus types */
+enum avf_bus_type {
+ avf_bus_type_unknown = 0,
+ avf_bus_type_pci,
+ avf_bus_type_pcix,
+ avf_bus_type_pci_express,
+ avf_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum avf_bus_speed {
+ avf_bus_speed_unknown = 0,
+ avf_bus_speed_33 = 33,
+ avf_bus_speed_66 = 66,
+ avf_bus_speed_100 = 100,
+ avf_bus_speed_120 = 120,
+ avf_bus_speed_133 = 133,
+ avf_bus_speed_2500 = 2500,
+ avf_bus_speed_5000 = 5000,
+ avf_bus_speed_8000 = 8000,
+ avf_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum avf_bus_width {
+ avf_bus_width_unknown = 0,
+ avf_bus_width_pcie_x1 = 1,
+ avf_bus_width_pcie_x2 = 2,
+ avf_bus_width_pcie_x4 = 4,
+ avf_bus_width_pcie_x8 = 8,
+ avf_bus_width_32 = 32,
+ avf_bus_width_64 = 64,
+ avf_bus_width_reserved
+};
+
+/* Bus parameters */
+struct avf_bus_info {
+ enum avf_bus_speed speed;
+ enum avf_bus_width width;
+ enum avf_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+ u16 bus_id;
+};
+
+/* Flow control (FC) parameters */
+struct avf_fc_info {
+ enum avf_fc_mode current_mode; /* FC mode in effect */
+ enum avf_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define AVF_MAX_TRAFFIC_CLASS 8
+#define AVF_MAX_USER_PRIORITY 8
+#define AVF_DCBX_MAX_APPS 32
+#define AVF_LLDPDU_SIZE 1500
+#define AVF_TLV_STATUS_OPER 0x1
+#define AVF_TLV_STATUS_SYNC 0x2
+#define AVF_TLV_STATUS_ERR 0x4
+#define AVF_CEE_OPER_MAX_APPS 3
+#define AVF_APP_PROTOID_FCOE 0x8906
+#define AVF_APP_PROTOID_ISCSI 0x0cbc
+#define AVF_APP_PROTOID_FIP 0x8914
+#define AVF_APP_SEL_ETHTYPE 0x1
+#define AVF_APP_SEL_TCPIP 0x2
+#define AVF_CEE_APP_SEL_ETHTYPE 0x0
+#define AVF_CEE_APP_SEL_TCPIP 0x1
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct avf_dcb_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[AVF_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[AVF_MAX_TRAFFIC_CLASS];
+ u8 tsatable[AVF_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct avf_dcb_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct avf_dcb_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct avf_dcbx_config {
+ u8 dcbx_mode;
+#define AVF_DCBX_MODE_CEE 0x1
+#define AVF_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define AVF_DCBX_APPS_NON_WILLING 0x1
+ u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
+ struct avf_dcb_ets_config etscfg;
+ struct avf_dcb_ets_config etsrec;
+ struct avf_dcb_pfc_config pfc;
+ struct avf_dcb_app_priority_table app[AVF_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct avf_hw {
+ u8 *hw_addr;
+ void *back;
+
+ /* subsystem structs */
+ struct avf_phy_info phy;
+ struct avf_mac_info mac;
+ struct avf_bus_info bus;
+ struct avf_nvm_info nvm;
+ struct avf_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct avf_hw_capabilities dev_caps;
+ struct avf_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct avf_adminq_info aq;
+
+ /* state of nvm update process */
+ enum avf_nvmupd_state nvmupd_state;
+ struct avf_aq_desc nvm_wb_desc;
+ struct avf_aq_desc nvm_aq_event_desc;
+ struct avf_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
+
+ /* HMC info */
+ struct avf_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct avf_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct avf_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct avf_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+ /* WoL and proxy support */
+ u16 num_wol_proxy_filters;
+ u16 wol_proxy_vsi_seid;
+
+#define AVF_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define AVF_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define AVF_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define AVF_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+ u64 flags;
+
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
+ /* debug mask */
+ u32 debug_mask;
+ char err_str[16];
+};
+
+STATIC INLINE bool avf_is_vf(struct avf_hw *hw)
+{
+ return (hw->mac.type == AVF_MAC_VF ||
+ hw->mac.type == AVF_MAC_X722_VF);
+}
+
+struct avf_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union avf_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union avf_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define AVF_RXD_QW0_MIRROR_STATUS_SHIFT 8
+#define AVF_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
+ AVF_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define AVF_RXD_QW0_FCOEINDX_SHIFT 0
+#define AVF_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
+ AVF_RXD_QW0_FCOEINDX_SHIFT)
+
+enum avf_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_DESC_STATUS_DD_SHIFT = 0,
+ AVF_RX_DESC_STATUS_EOF_SHIFT = 1,
+ AVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ AVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ AVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ AVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ AVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ AVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+
+ AVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ AVF_RX_DESC_STATUS_FLM_SHIFT = 11,
+ AVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ AVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ AVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ AVF_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
+ AVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+ AVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define AVF_RXD_QW1_STATUS_SHIFT 0
+#define AVF_RXD_QW1_STATUS_MASK ((BIT(AVF_RX_DESC_STATUS_LAST) - 1) << \
+ AVF_RXD_QW1_STATUS_SHIFT)
+
+#define AVF_RXD_QW1_STATUS_TSYNINDX_SHIFT AVF_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define AVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ AVF_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define AVF_RXD_QW1_STATUS_TSYNVALID_SHIFT AVF_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define AVF_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(AVF_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define AVF_RXD_QW1_STATUS_UMBCAST_SHIFT AVF_RX_DESC_STATUS_UMBCAST
+#define AVF_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
+ AVF_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum avf_rx_desc_fltstat_values {
+ AVF_RX_DESC_FLTSTAT_NO_DATA = 0,
+ AVF_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ AVF_RX_DESC_FLTSTAT_RSV = 2,
+ AVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define AVF_RXD_PACKET_TYPE_UNICAST 0
+#define AVF_RXD_PACKET_TYPE_MULTICAST 1
+#define AVF_RXD_PACKET_TYPE_BROADCAST 2
+#define AVF_RXD_PACKET_TYPE_MIRRORED 3
+
+#define AVF_RXD_QW1_ERROR_SHIFT 19
+#define AVF_RXD_QW1_ERROR_MASK (0xFFUL << AVF_RXD_QW1_ERROR_SHIFT)
+
+enum avf_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_DESC_ERROR_RXE_SHIFT = 0,
+ AVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ AVF_RX_DESC_ERROR_HBO_SHIFT = 2,
+ AVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ AVF_RX_DESC_ERROR_IPE_SHIFT = 3,
+ AVF_RX_DESC_ERROR_L4E_SHIFT = 4,
+ AVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ AVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ AVF_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum avf_rx_desc_error_l3l4e_fcoe_masks {
+ AVF_RX_DESC_ERROR_L3L4E_NONE = 0,
+ AVF_RX_DESC_ERROR_L3L4E_PROT = 1,
+ AVF_RX_DESC_ERROR_L3L4E_FC = 2,
+ AVF_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ AVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define AVF_RXD_QW1_PTYPE_SHIFT 30
+#define AVF_RXD_QW1_PTYPE_MASK (0xFFULL << AVF_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum avf_rx_l2_ptype {
+ AVF_RX_PTYPE_L2_RESERVED = 0,
+ AVF_RX_PTYPE_L2_MAC_PAY2 = 1,
+ AVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ AVF_RX_PTYPE_L2_FIP_PAY2 = 3,
+ AVF_RX_PTYPE_L2_OUI_PAY2 = 4,
+ AVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ AVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ AVF_RX_PTYPE_L2_ECP_PAY2 = 7,
+ AVF_RX_PTYPE_L2_EVB_PAY2 = 8,
+ AVF_RX_PTYPE_L2_QCN_PAY2 = 9,
+ AVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ AVF_RX_PTYPE_L2_ARP = 11,
+ AVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ AVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ AVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ AVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ AVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ AVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ AVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ AVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ AVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ AVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ AVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ AVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ AVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ AVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct avf_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum avf_rx_ptype_outer_ip {
+ AVF_RX_PTYPE_OUTER_L2 = 0,
+ AVF_RX_PTYPE_OUTER_IP = 1
+};
+
+enum avf_rx_ptype_outer_ip_ver {
+ AVF_RX_PTYPE_OUTER_NONE = 0,
+ AVF_RX_PTYPE_OUTER_IPV4 = 0,
+ AVF_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum avf_rx_ptype_outer_fragmented {
+ AVF_RX_PTYPE_NOT_FRAG = 0,
+ AVF_RX_PTYPE_FRAG = 1
+};
+
+enum avf_rx_ptype_tunnel_type {
+ AVF_RX_PTYPE_TUNNEL_NONE = 0,
+ AVF_RX_PTYPE_TUNNEL_IP_IP = 1,
+ AVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ AVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ AVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum avf_rx_ptype_tunnel_end_prot {
+ AVF_RX_PTYPE_TUNNEL_END_NONE = 0,
+ AVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ AVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum avf_rx_ptype_inner_prot {
+ AVF_RX_PTYPE_INNER_PROT_NONE = 0,
+ AVF_RX_PTYPE_INNER_PROT_UDP = 1,
+ AVF_RX_PTYPE_INNER_PROT_TCP = 2,
+ AVF_RX_PTYPE_INNER_PROT_SCTP = 3,
+ AVF_RX_PTYPE_INNER_PROT_ICMP = 4,
+ AVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum avf_rx_ptype_payload_layer {
+ AVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ AVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ AVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ AVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define AVF_RX_PTYPE_BIT_MASK 0x0FFFFFFF
+#define AVF_RX_PTYPE_SHIFT 56
+
+#define AVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define AVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ AVF_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define AVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define AVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ AVF_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define AVF_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define AVF_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(AVF_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define AVF_RXD_QW1_NEXTP_SHIFT 38
+#define AVF_RXD_QW1_NEXTP_MASK (0x1FFFULL << AVF_RXD_QW1_NEXTP_SHIFT)
+
+#define AVF_RXD_QW2_EXT_STATUS_SHIFT 0
+#define AVF_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
+ AVF_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum avf_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ AVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ AVF_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ AVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ AVF_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ AVF_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ AVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+#define AVF_RXD_QW2_L2TAG2_SHIFT 0
+#define AVF_RXD_QW2_L2TAG2_MASK (0xFFFFUL << AVF_RXD_QW2_L2TAG2_SHIFT)
+
+#define AVF_RXD_QW2_L2TAG3_SHIFT 16
+#define AVF_RXD_QW2_L2TAG3_MASK (0xFFFFUL << AVF_RXD_QW2_L2TAG3_SHIFT)
+
+enum avf_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ AVF_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ AVF_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ AVF_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ AVF_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ AVF_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ AVF_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ AVF_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ AVF_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define AVF_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define AVF_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define AVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define AVF_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ AVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define AVF_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
+#define AVF_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
+ AVF_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define AVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define AVF_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ AVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum avf_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ AVF_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum avf_rx_prog_status_desc_prog_id_masks {
+ AVF_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ AVF_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ AVF_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum avf_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ AVF_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ AVF_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ AVF_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+#define AVF_TWO_BIT_MASK 0x3
+#define AVF_THREE_BIT_MASK 0x7
+#define AVF_FOUR_BIT_MASK 0xF
+#define AVF_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct avf_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define AVF_TXD_QW1_DTYPE_SHIFT 0
+#define AVF_TXD_QW1_DTYPE_MASK (0xFUL << AVF_TXD_QW1_DTYPE_SHIFT)
+
+enum avf_tx_desc_dtype_value {
+ AVF_TX_DESC_DTYPE_DATA = 0x0,
+ AVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ AVF_TX_DESC_DTYPE_CONTEXT = 0x1,
+ AVF_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ AVF_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ AVF_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ AVF_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ AVF_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ AVF_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ AVF_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define AVF_TXD_QW1_CMD_SHIFT 4
+#define AVF_TXD_QW1_CMD_MASK (0x3FFUL << AVF_TXD_QW1_CMD_SHIFT)
+
+enum avf_tx_desc_cmd_bits {
+ AVF_TX_DESC_CMD_EOP = 0x0001,
+ AVF_TX_DESC_CMD_RS = 0x0002,
+ AVF_TX_DESC_CMD_ICRC = 0x0004,
+ AVF_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ AVF_TX_DESC_CMD_DUMMY = 0x0010,
+ AVF_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ AVF_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ AVF_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ AVF_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ AVF_TX_DESC_CMD_FCOET = 0x0080,
+ AVF_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ AVF_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define AVF_TXD_QW1_OFFSET_SHIFT 16
+#define AVF_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ AVF_TXD_QW1_OFFSET_SHIFT)
+
+enum avf_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ AVF_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ AVF_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define AVF_TXD_QW1_MACLEN_MASK (0x7FUL << AVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define AVF_TXD_QW1_IPLEN_MASK (0x7FUL << AVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define AVF_TXD_QW1_L4LEN_MASK (0xFUL << AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define AVF_TXD_QW1_FCLEN_MASK (0xFUL << AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define AVF_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define AVF_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ AVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define AVF_TXD_QW1_L2TAG1_SHIFT 48
+#define AVF_TXD_QW1_L2TAG1_MASK (0xFFFFULL << AVF_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct avf_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define AVF_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define AVF_TXD_CTX_QW1_DTYPE_MASK (0xFUL << AVF_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define AVF_TXD_CTX_QW1_CMD_SHIFT 4
+#define AVF_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << AVF_TXD_CTX_QW1_CMD_SHIFT)
+
+enum avf_tx_ctx_desc_cmd_bits {
+ AVF_TX_CTX_DESC_TSO = 0x01,
+ AVF_TX_CTX_DESC_TSYN = 0x02,
+ AVF_TX_CTX_DESC_IL2TAG2 = 0x04,
+ AVF_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ AVF_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ AVF_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ AVF_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ AVF_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ AVF_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define AVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define AVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ AVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define AVF_TXD_CTX_QW1_MSS_SHIFT 50
+#define AVF_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ AVF_TXD_CTX_QW1_MSS_SHIFT)
+
+#define AVF_TXD_CTX_QW1_VSI_SHIFT 50
+#define AVF_TXD_CTX_QW1_VSI_MASK (0x1FFULL << AVF_TXD_CTX_QW1_VSI_SHIFT)
+
+#define AVF_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define AVF_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ AVF_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum avf_tx_ctx_desc_eipt_offload {
+ AVF_TX_CTX_EXT_IP_NONE = 0x0,
+ AVF_TX_CTX_EXT_IP_IPV6 = 0x1,
+ AVF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ AVF_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define AVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define AVF_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ AVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define AVF_TXD_CTX_QW0_NATT_SHIFT 9
+#define AVF_TXD_CTX_QW0_NATT_MASK (0x3ULL << AVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define AVF_TXD_CTX_UDP_TUNNELING BIT_ULL(AVF_TXD_CTX_QW0_NATT_SHIFT)
+#define AVF_TXD_CTX_GRE_TUNNELING (0x2ULL << AVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define AVF_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define AVF_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(AVF_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define AVF_TXD_CTX_EIP_NOINC_IPID_CONST AVF_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define AVF_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define AVF_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ AVF_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define AVF_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define AVF_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ AVF_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#define AVF_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define AVF_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(AVF_TXD_CTX_QW0_L4T_CS_SHIFT)
+struct avf_nop_desc {
+ __le64 rsvd;
+ __le64 dtype_cmd;
+};
+
+#define AVF_TXD_NOP_QW1_DTYPE_SHIFT 0
+#define AVF_TXD_NOP_QW1_DTYPE_MASK (0xFUL << AVF_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define AVF_TXD_NOP_QW1_CMD_SHIFT 4
+#define AVF_TXD_NOP_QW1_CMD_MASK (0x7FUL << AVF_TXD_NOP_QW1_CMD_SHIFT)
+
+enum avf_tx_nop_desc_cmd_bits {
+ /* Note: These are predefined bit offsets */
+ AVF_TX_NOP_DESC_EOP_SHIFT = 0,
+ AVF_TX_NOP_DESC_RS_SHIFT = 1,
+ AVF_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
+};
+
+struct avf_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define AVF_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define AVF_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ AVF_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define AVF_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define AVF_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ AVF_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define AVF_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define AVF_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ AVF_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum avf_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ AVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ AVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ AVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ AVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ AVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ AVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ AVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ AVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ AVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ AVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ AVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ AVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ AVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ AVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ AVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ AVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ AVF_FILTER_PCTYPE_FCOE_OX = 48,
+ AVF_FILTER_PCTYPE_FCOE_RX = 49,
+ AVF_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ AVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum avf_filter_program_desc_dest {
+ AVF_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ AVF_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ AVF_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum avf_filter_program_desc_fd_status {
+ AVF_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ AVF_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ AVF_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ AVF_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define AVF_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define AVF_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ AVF_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_DTYPE_SHIFT 0
+#define AVF_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << AVF_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_CMD_SHIFT 4
+#define AVF_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ AVF_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + AVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define AVF_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << AVF_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum avf_filter_program_desc_pcmd {
+ AVF_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ AVF_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define AVF_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + AVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define AVF_TXD_FLTR_QW1_DEST_MASK (0x3ULL << AVF_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + AVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define AVF_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(AVF_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ AVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define AVF_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ AVF_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ AVF_TXD_FLTR_QW1_CMD_SHIFT)
+#define AVF_TXD_FLTR_QW1_ATR_MASK BIT_ULL(AVF_TXD_FLTR_QW1_ATR_SHIFT)
+
+#define AVF_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define AVF_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ AVF_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum avf_filter_type {
+ AVF_FLOW_DIRECTOR_FLTR = 0,
+ AVF_PE_QUAD_HASH_FLTR = 1,
+ AVF_ETHERTYPE_FLTR,
+ AVF_FCOE_CTX_FLTR,
+ AVF_MAC_VLAN_FLTR,
+ AVF_HASH_FLTR
+};
+
+struct avf_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct avf_aqc_vsi_properties_data info;
+};
+
+struct avf_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct avf_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct avf_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected per VEB per TC */
+struct avf_veb_tc_stats {
+ u64 tc_rx_packets[AVF_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[AVF_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[AVF_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[AVF_MAX_TRAFFIC_CLASS];
+};
+
+/* Statistics collected per function for FCoE */
+struct avf_fcoe_stats {
+ u64 rx_fcoe_packets; /* fcoeprc */
+ u64 rx_fcoe_dwords; /* focedwrc */
+ u64 rx_fcoe_dropped; /* fcoerpdc */
+ u64 tx_fcoe_packets; /* fcoeptc */
+ u64 tx_fcoe_dwords; /* focedwtc */
+ u64 fcoe_bad_fccrc; /* fcoecrc */
+ u64 fcoe_last_error; /* fcoelast */
+ u64 fcoe_ddp_count; /* fcoeddpc */
+};
+
+/* offset to per function FCoE statistics block */
+#define AVF_FCOE_VF_STAT_OFFSET 0
+#define AVF_FCOE_PF_STAT_OFFSET 128
+#define AVF_FCOE_STAT_MAX (AVF_FCOE_PF_STAT_OFFSET + AVF_MAX_PF)
+
+/* Statistics collected by the MAC */
+struct avf_hw_port_stats {
+ /* eth stats collected by the port */
+ struct avf_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define AVF_SR_NVM_CONTROL_WORD 0x00
+#define AVF_SR_PCIE_ANALOG_CONFIG_PTR 0x03
+#define AVF_SR_PHY_ANALOG_CONFIG_PTR 0x04
+#define AVF_SR_OPTION_ROM_PTR 0x05
+#define AVF_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define AVF_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define AVF_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define AVF_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define AVF_SR_RO_PCIE_LCB_PTR 0x0A
+#define AVF_SR_EMP_IMAGE_PTR 0x0B
+#define AVF_SR_PE_IMAGE_PTR 0x0C
+#define AVF_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define AVF_SR_MNG_CONFIG_PTR 0x0E
+#define AVF_EMP_MODULE_PTR 0x0F
+#define AVF_SR_EMP_MODULE_PTR 0x48
+#define AVF_SR_PBA_FLAGS 0x15
+#define AVF_SR_PBA_BLOCK_PTR 0x16
+#define AVF_SR_BOOT_CONFIG_PTR 0x17
+#define AVF_NVM_OEM_VER_OFF 0x83
+#define AVF_SR_NVM_DEV_STARTER_VERSION 0x18
+#define AVF_SR_NVM_WAKE_ON_LAN 0x19
+#define AVF_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define AVF_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define AVF_SR_NVM_MAP_VERSION 0x29
+#define AVF_SR_NVM_IMAGE_VERSION 0x2A
+#define AVF_SR_NVM_STRUCTURE_VERSION 0x2B
+#define AVF_SR_NVM_EETRACK_LO 0x2D
+#define AVF_SR_NVM_EETRACK_HI 0x2E
+#define AVF_SR_VPD_PTR 0x2F
+#define AVF_SR_PXE_SETUP_PTR 0x30
+#define AVF_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define AVF_SR_NVM_ORIGINAL_EETRACK_LO 0x34
+#define AVF_SR_NVM_ORIGINAL_EETRACK_HI 0x35
+#define AVF_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
+#define AVF_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define AVF_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define AVF_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define AVF_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define AVF_SR_PHY_ACTIVITY_LIST_PTR 0x3D
+#define AVF_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define AVF_SR_SW_CHECKSUM_WORD 0x3F
+#define AVF_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
+#define AVF_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
+#define AVF_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
+#define AVF_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
+#define AVF_SR_EMP_SR_SETTINGS_PTR 0x48
+#define AVF_SR_FEATURE_CONFIGURATION_PTR 0x49
+#define AVF_SR_CONFIGURATION_METADATA_PTR 0x4D
+#define AVF_SR_IMMEDIATE_VALUES_PTR 0x4E
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define AVF_SR_VPD_MODULE_MAX_SIZE 1024
+#define AVF_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define AVF_SR_CONTROL_WORD_1_SHIFT 0x06
+#define AVF_SR_CONTROL_WORD_1_MASK (0x03 << AVF_SR_CONTROL_WORD_1_SHIFT)
+#define AVF_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
+#define AVF_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
+#define AVF_PTR_TYPE BIT(15)
+
+/* Shadow RAM related */
+#define AVF_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define AVF_SR_BUF_ALIGNMENT 4096
+#define AVF_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define AVF_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define AVF_SRRD_SRCTL_ATTEMPTS 100000
+
+/* FCoE Tx context descriptor - Use the avf_tx_context_desc struct */
+
+enum i40E_fcoe_tx_ctx_desc_cmd_bits {
+ AVF_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
+ AVF_FCOE_TX_CTX_DESC_RELOFF = 0x10,
+ AVF_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
+ AVF_FCOE_TX_CTX_DESC_DIFENA = 0x40,
+ AVF_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
+};
+
+/* FCoE DIF/DIX Context descriptor */
+struct avf_fcoe_difdix_context_desc {
+ __le64 flags_buff0_buff1_ref;
+ __le64 difapp_msk_bias;
+};
+
+#define AVF_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT 0
+#define AVF_FCOE_DIFDIX_CTX_QW0_FLAGS_MASK (0xFFFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT)
+
+enum avf_fcoe_difdix_ctx_desc_flags_bits {
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_RSVD = 0x0000,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGCHK = 0x0000,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGNOTCHK = 0x0004,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_GTYPE_OPAQUE = 0x0000,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY = 0x0008,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPTAG = 0x0010,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPREFTAG = 0x0018,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_REFTYPE_CNST = 0x0000,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_REFTYPE_INC1BLK = 0x0020,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_REFTYPE_APPTAG = 0x0040,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_REFTYPE_RSVD = 0x0060,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIXMODE_XSUM = 0x0000,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIXMODE_CRC = 0x0080,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFHOST_UNTAG = 0x0000,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFHOST_BUF = 0x0100,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFHOST_RSVD = 0x0200,
+ /* 2 BITS */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFHOST_EMBDTAGS = 0x0300,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFLAN_UNTAG = 0x0000,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFLAN_TAG = 0x0400,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFBLK_512B = 0x0000,
+ /* 1 BIT */
+ AVF_FCOE_DIFDIX_CTX_DESC_DIFBLK_4K = 0x0800
+};
+
+#define AVF_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT 12
+#define AVF_FCOE_DIFDIX_CTX_QW0_BUFF0_MASK (0x3FFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT)
+
+#define AVF_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT 22
+#define AVF_FCOE_DIFDIX_CTX_QW0_BUFF1_MASK (0x3FFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT)
+
+#define AVF_FCOE_DIFDIX_CTX_QW0_REF_SHIFT 32
+#define AVF_FCOE_DIFDIX_CTX_QW0_REF_MASK (0xFFFFFFFFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW0_REF_SHIFT)
+
+#define AVF_FCOE_DIFDIX_CTX_QW1_APP_SHIFT 0
+#define AVF_FCOE_DIFDIX_CTX_QW1_APP_MASK (0xFFFFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW1_APP_SHIFT)
+
+#define AVF_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT 16
+#define AVF_FCOE_DIFDIX_CTX_QW1_APP_MSK_MASK (0xFFFFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT)
+
+#define AVF_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT 32
+#define AVF_FCOE_DIFDIX_CTX_QW0_REF_BIAS_MASK (0xFFFFFFFFULL << \
+ AVF_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT)
+
+/* FCoE DIF/DIX Buffers descriptor */
+struct avf_fcoe_difdix_buffers_desc {
+ __le64 buff_addr0;
+ __le64 buff_addr1;
+};
+
+/* FCoE DDP Context descriptor */
+struct avf_fcoe_ddp_context_desc {
+ __le64 rsvd;
+ __le64 type_cmd_foff_lsize;
+};
+
+#define AVF_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
+#define AVF_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
+ AVF_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
+
+#define AVF_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
+#define AVF_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
+ AVF_FCOE_DDP_CTX_QW1_CMD_SHIFT)
+
+enum avf_fcoe_ddp_ctx_desc_cmd_bits {
+ AVF_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
+ AVF_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
+ AVF_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
+ AVF_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
+ AVF_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
+ AVF_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
+};
+
+#define AVF_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
+#define AVF_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
+ AVF_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
+
+#define AVF_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
+#define AVF_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
+ AVF_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
+
+/* FCoE DDP/DWO Queue Context descriptor */
+struct avf_fcoe_queue_context_desc {
+ __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
+ __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
+};
+
+#define AVF_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
+#define AVF_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
+ AVF_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
+
+#define AVF_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
+#define AVF_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
+ AVF_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
+
+#define AVF_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
+#define AVF_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
+ AVF_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+#define AVF_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
+#define AVF_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
+ AVF_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+enum avf_fcoe_queue_ctx_desc_tph_bits {
+ AVF_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
+ AVF_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
+};
+
+#define AVF_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
+#define AVF_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
+ AVF_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
+
+/* FCoE DDP/DWO Filter Context descriptor */
+struct avf_fcoe_filter_context_desc {
+ __le32 param;
+ __le16 seqn;
+
+ /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
+ __le16 rsvd_dmaindx;
+
+ /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
+ __le64 flags_rsvd_lanq;
+};
+
+#define AVF_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
+#define AVF_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
+ AVF_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
+
+enum avf_fcoe_filter_ctx_desc_flags_bits {
+ AVF_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
+ AVF_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
+ AVF_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
+ AVF_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
+ AVF_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
+ AVF_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
+};
+
+#define AVF_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
+#define AVF_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
+ AVF_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
+
+#define AVF_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
+#define AVF_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
+ AVF_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
+
+#define AVF_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
+#define AVF_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
+ AVF_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
+
+enum avf_switch_element_types {
+ AVF_SWITCH_ELEMENT_TYPE_MAC = 1,
+ AVF_SWITCH_ELEMENT_TYPE_PF = 2,
+ AVF_SWITCH_ELEMENT_TYPE_VF = 3,
+ AVF_SWITCH_ELEMENT_TYPE_EMP = 4,
+ AVF_SWITCH_ELEMENT_TYPE_BMC = 6,
+ AVF_SWITCH_ELEMENT_TYPE_PE = 16,
+ AVF_SWITCH_ELEMENT_TYPE_VEB = 17,
+ AVF_SWITCH_ELEMENT_TYPE_PA = 18,
+ AVF_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum avf_ether_type_index {
+ AVF_ETHER_TYPE_1588 = 0,
+ AVF_ETHER_TYPE_FIP = 1,
+ AVF_ETHER_TYPE_OUI_EXTENDED = 2,
+ AVF_ETHER_TYPE_MAC_CONTROL = 3,
+ AVF_ETHER_TYPE_LLDP = 4,
+ AVF_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ AVF_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ AVF_ETHER_TYPE_QCN_CNM = 7,
+ AVF_ETHER_TYPE_8021X = 8,
+ AVF_ETHER_TYPE_ARP = 9,
+ AVF_ETHER_TYPE_RSV1 = 10,
+ AVF_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define AVF_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum avf_hash_filter_size {
+ AVF_HASH_FILTER_SIZE_1K = 0,
+ AVF_HASH_FILTER_SIZE_2K = 1,
+ AVF_HASH_FILTER_SIZE_4K = 2,
+ AVF_HASH_FILTER_SIZE_8K = 3,
+ AVF_HASH_FILTER_SIZE_16K = 4,
+ AVF_HASH_FILTER_SIZE_32K = 5,
+ AVF_HASH_FILTER_SIZE_64K = 6,
+ AVF_HASH_FILTER_SIZE_128K = 7,
+ AVF_HASH_FILTER_SIZE_256K = 8,
+ AVF_HASH_FILTER_SIZE_512K = 9,
+ AVF_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define AVF_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum avf_dma_cntx_size {
+ AVF_DMA_CNTX_SIZE_512 = 0,
+ AVF_DMA_CNTX_SIZE_1K = 1,
+ AVF_DMA_CNTX_SIZE_2K = 2,
+ AVF_DMA_CNTX_SIZE_4K = 3,
+ AVF_DMA_CNTX_SIZE_8K = 4,
+ AVF_DMA_CNTX_SIZE_16K = 5,
+ AVF_DMA_CNTX_SIZE_32K = 6,
+ AVF_DMA_CNTX_SIZE_64K = 7,
+ AVF_DMA_CNTX_SIZE_128K = 8,
+ AVF_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum avf_hash_lut_size {
+ AVF_HASH_LUT_SIZE_128 = 0,
+ AVF_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct avf_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum avf_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum avf_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum avf_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum avf_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum avf_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct avf_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum avf_reset_type {
+ AVF_RESET_POR = 0,
+ AVF_RESET_CORER = 1,
+ AVF_RESET_GLOBR = 2,
+ AVF_RESET_EMPR = 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define AVF_NVM_LLDP_CFG_PTR 0x06
+#define AVF_SR_LLDP_CFG_PTR 0x31
+struct avf_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define AVF_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define AVF_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define AVF_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define AVF_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define AVF_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define AVF_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define AVF_ALT_BW_VALUE_MASK 0xFF
+#define AVF_ALT_BW_RELATIVE_MASK 0x40000000
+#define AVF_ALT_BW_VALID_MASK 0x80000000
+
+/* RSS Hash Table Size */
+#define AVF_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define AVF_L3_SRC_SHIFT 47
+#define AVF_L3_SRC_MASK (0x3ULL << AVF_L3_SRC_SHIFT)
+#define AVF_L3_V6_SRC_SHIFT 43
+#define AVF_L3_V6_SRC_MASK (0xFFULL << AVF_L3_V6_SRC_SHIFT)
+#define AVF_L3_DST_SHIFT 35
+#define AVF_L3_DST_MASK (0x3ULL << AVF_L3_DST_SHIFT)
+#define AVF_L3_V6_DST_SHIFT 35
+#define AVF_L3_V6_DST_MASK (0xFFULL << AVF_L3_V6_DST_SHIFT)
+#define AVF_L4_SRC_SHIFT 34
+#define AVF_L4_SRC_MASK (0x1ULL << AVF_L4_SRC_SHIFT)
+#define AVF_L4_DST_SHIFT 33
+#define AVF_L4_DST_MASK (0x1ULL << AVF_L4_DST_SHIFT)
+#define AVF_VERIFY_TAG_SHIFT 31
+#define AVF_VERIFY_TAG_MASK (0x3ULL << AVF_VERIFY_TAG_SHIFT)
+
+#define AVF_FLEX_50_SHIFT 13
+#define AVF_FLEX_50_MASK (0x1ULL << AVF_FLEX_50_SHIFT)
+#define AVF_FLEX_51_SHIFT 12
+#define AVF_FLEX_51_MASK (0x1ULL << AVF_FLEX_51_SHIFT)
+#define AVF_FLEX_52_SHIFT 11
+#define AVF_FLEX_52_MASK (0x1ULL << AVF_FLEX_52_SHIFT)
+#define AVF_FLEX_53_SHIFT 10
+#define AVF_FLEX_53_MASK (0x1ULL << AVF_FLEX_53_SHIFT)
+#define AVF_FLEX_54_SHIFT 9
+#define AVF_FLEX_54_MASK (0x1ULL << AVF_FLEX_54_SHIFT)
+#define AVF_FLEX_55_SHIFT 8
+#define AVF_FLEX_55_MASK (0x1ULL << AVF_FLEX_55_SHIFT)
+#define AVF_FLEX_56_SHIFT 7
+#define AVF_FLEX_56_MASK (0x1ULL << AVF_FLEX_56_SHIFT)
+#define AVF_FLEX_57_SHIFT 6
+#define AVF_FLEX_57_MASK (0x1ULL << AVF_FLEX_57_SHIFT)
+
+/* Version format for Dynamic Device Personalization(DDP) */
+struct avf_ddp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define AVF_DDP_NAME_SIZE 32
+
+/* Package header */
+struct avf_package_header {
+ struct avf_ddp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct avf_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_AVF 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct avf_ddp_version version;
+ u32 size;
+ char name[AVF_DDP_NAME_SIZE];
+};
+
+struct avf_metadata_segment {
+ struct avf_generic_seg_header header;
+ struct avf_ddp_version version;
+#define AVF_DDP_TRACKID_RDONLY 0
+#define AVF_DDP_TRACKID_INVALID 0xFFFFFFFF
+ u32 track_id;
+ char name[AVF_DDP_NAME_SIZE];
+};
+
+struct avf_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct avf_profile_segment {
+ struct avf_generic_seg_header header;
+ struct avf_ddp_version version;
+ char name[AVF_DDP_NAME_SIZE];
+ u32 device_table_count;
+ struct avf_device_id_entry device_table[1];
+};
+
+struct avf_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct avf_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_RB_MMIO 0x00001800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_RB_AQ 0x00001801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct avf_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct avf_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
+struct avf_profile_info {
+ u32 track_id;
+ struct avf_ddp_version version;
+ u8 op;
+#define AVF_DDP_ADD_TRACKID 0x01
+#define AVF_DDP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[AVF_DDP_NAME_SIZE];
+};
+#endif /* _AVF_TYPE_H_ */
diff --git a/drivers/net/avf/base/virtchnl.h b/drivers/net/avf/base/virtchnl.h
new file mode 100644
index 00000000..167518f0
--- /dev/null
+++ b/drivers/net/avf/base/virtchnl.h
@@ -0,0 +1,787 @@
+/*******************************************************************************
+
+Copyright (c) 2013 - 2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+};
+
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+ VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
+#endif
+#ifdef VIRTCHNL_IWARP
+ VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+#endif
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures.*/
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_cap_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u32 pad1;
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support. If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+#ifdef VIRTCHNL_SOL_VF_SUPPORT
+/* VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG
+ * VF sends this message to get the default MTU and list of additional ethernet
+ * addresses it is allowed to use.
+ * PF responds with an indirect message containing
+ * virtchnl_addnl_solaris_config with zero or more
+ * virtchnl_ether_addr structures.
+ *
+ * It is expected that this operation will only ever be needed for Solaris VFs
+ * running under a Solaris PF.
+ */
+struct virtchnl_addnl_solaris_config {
+ u16 default_mtu;
+ struct virtchnl_ether_addr_list al;
+};
+
+#endif
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct virtchnl_eth_stats in an external buffer.
+ */
+
+struct virtchnl_eth_stats {
+ u64 rx_bytes; /* received bytes */
+ u64 rx_unicast; /* received unicast pkts */
+ u64 rx_multicast; /* received multicast pkts */
+ u64 rx_broadcast; /* received broadcast pkts */
+ u64 rx_discards;
+ u64 rx_unknown_protocol;
+ u64 tx_bytes; /* transmitted bytes*/
+ u64 tx_unicast; /* transmitted unicast pkts */
+ u64 tx_multicast; /* transmitted multicast pkts */
+ u64 tx_broadcast; /* transmitted broadcast pkts */
+ u64 tx_discards;
+ u64 tx_errors;
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ struct {
+ enum virtchnl_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+#ifdef VIRTCHNL_IWARP
+
+/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define QUEUE_TYPE_PE_AEQ 0x80
+#define QUEUE_INVALID_IDX 0xFFFF
+
+struct virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+
+struct virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct virtchnl_iwarp_qv_info qv_info[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+
+#endif
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+#ifdef VIRTCHNL_IWARP
+ case VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = true;
+ break;
+ case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ break;
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_iwarp_qvlist_info *qv =
+ (struct virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct virtchnl_iwarp_qv_info));
+ }
+ break;
+#endif
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_ERR_PARAM;
+ }
+ /* few more checks */
+ if (err_msg_format || valid_len != msglen)
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/drivers/net/avf/rte_pmd_avf_version.map b/drivers/net/avf/rte_pmd_avf_version.map
new file mode 100644
index 00000000..179140fb
--- /dev/null
+++ b/drivers/net/avf/rte_pmd_avf_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+ local: *;
+};
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index 9b342bfa..dba99120 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -36,7 +36,7 @@
#include <errno.h>
#include <unistd.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -60,6 +60,7 @@
#include "avp_logs.h"
+int avp_logtype_driver;
static int avp_dev_create(struct rte_pci_device *pci_dev,
struct rte_eth_dev *eth_dev);
@@ -2312,3 +2313,12 @@ avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
+
+RTE_INIT(avp_init_log);
+static void
+avp_init_log(void)
+{
+ avp_logtype_driver = rte_log_register("pmd.net.avp.driver");
+ if (avp_logtype_driver >= 0)
+ rte_log_set_level(avp_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/avp/avp_logs.h b/drivers/net/avp/avp_logs.h
index 252cab7d..e29394d5 100644
--- a/drivers/net/avp/avp_logs.h
+++ b/drivers/net/avp/avp_logs.h
@@ -49,11 +49,10 @@
#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
#endif
-#ifdef RTE_LIBRTE_AVP_DEBUG_DRIVER
+extern int avp_logtype_driver;
+
#define PMD_DRV_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, avp_logtype_driver, \
+ "%s(): " fmt, __func__, ## args)
#endif /* _AVP_LOGS_H_ */
diff --git a/drivers/net/avp/rte_avp_common.h b/drivers/net/avp/rte_avp_common.h
index 54437e9a..81dfe5ea 100644
--- a/drivers/net/avp/rte_avp_common.h
+++ b/drivers/net/avp/rte_avp_common.h
@@ -63,6 +63,7 @@
#else
#include <stdint.h>
#include <rte_common.h>
+#include <rte_config.h>
#include <rte_memory.h>
#include <rte_ether.h>
#include <rte_atomic.h>
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 9394f6c5..fb02d0f3 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -1130,9 +1130,7 @@ static void
bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
union eth_rx_cqe *rr_cqe)
{
-#ifdef RTE_LIBRTE_BNX2X_DEBUG
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
-#endif
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
struct ecore_queue_sp_obj *q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj;
@@ -7547,9 +7545,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
struct bnx2x_pci_cap *caps;
uint16_t link_status;
-#ifdef RTE_LIBRTE_BNX2X_DEBUG
int reg = 0;
-#endif
/* check if PCI Power Management is enabled */
caps = pci_find_cap(sc, PCIY_PMG, BNX2X_PCI_CAP);
@@ -8137,7 +8133,7 @@ static int bnx2x_get_igu_cam_info(struct bnx2x_softc *sc)
continue;
}
fid = IGU_FID(val);
- if ((fid & IGU_FID_ENCODE_IS_PF)) {
+ if (fid & IGU_FID_ENCODE_IS_PF) {
if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
continue;
}
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 95861a06..483d5a17 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -14,6 +14,9 @@
#include <rte_dev.h>
#include <rte_ethdev_pci.h>
+int bnx2x_logtype_init;
+int bnx2x_logtype_driver;
+
/*
* The set of PCI devices this driver supports
*/
@@ -687,3 +690,15 @@ RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
+
+RTE_INIT(bnx2x_init_log);
+static void
+bnx2x_init_log(void)
+{
+ bnx2x_logtype_init = rte_log_register("pmd.bnx2x.init");
+ if (bnx2x_logtype_init >= 0)
+ rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE);
+ bnx2x_logtype_driver = rte_log_register("pmd.bnx2x.driver");
+ if (bnx2x_logtype_driver >= 0)
+ rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.h b/drivers/net/bnx2x/bnx2x_ethdev.h
index 967d6dc5..37cac158 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.h
+++ b/drivers/net/bnx2x/bnx2x_ethdev.h
@@ -33,7 +33,7 @@
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_malloc.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_spinlock.h>
#include <rte_eal.h>
#include <rte_mempool.h>
diff --git a/drivers/net/bnx2x/bnx2x_logs.h b/drivers/net/bnx2x/bnx2x_logs.h
index dff014d7..08c1b764 100644
--- a/drivers/net/bnx2x/bnx2x_logs.h
+++ b/drivers/net/bnx2x/bnx2x_logs.h
@@ -11,14 +11,12 @@
#ifndef _PMD_LOGS_H_
#define _PMD_LOGS_H_
+extern int bnx2x_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+ rte_log(RTE_LOG_ ## level, bnx2x_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
-#ifdef RTE_LIBRTE_BNX2X_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while(0)
-#endif
#ifdef RTE_LIBRTE_BNX2X_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -41,12 +39,10 @@
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
#endif
-#ifdef RTE_LIBRTE_BNX2X_DEBUG
+extern int bnx2x_logtype_driver;
#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, bnx2x_logtype_driver, \
+ "%s(): " fmt, __func__, ## args)
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
@@ -58,5 +54,4 @@
#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) do { } while(0)
#endif
-
#endif /* _PMD_LOGS_H_ */
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index ef7f9fea..a75a7fa4 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -3402,7 +3402,7 @@ void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
/* CAM credit is equally divided between all active functions
* on the PORT!.
*/
- if ((func_num > 0)) {
+ if (func_num > 0) {
if (!CHIP_REV_IS_SLOW(sc))
cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
else
@@ -3419,7 +3419,7 @@ void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
* CAM credit is equaly divided between all active functions
* on the PATH.
*/
- if ((func_num > 0)) {
+ if (func_num > 0) {
if (!CHIP_REV_IS_SLOW(sc))
cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
else
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 8ab1c7f8..b5a0badf 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -40,10 +40,11 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_memory.h>
#include <rte_lcore.h>
#include <rte_spinlock.h>
+#include <rte_time.h>
#include "bnxt_cpr.h"
@@ -162,6 +163,7 @@ struct bnxt_link_info {
uint16_t link_speed;
uint16_t support_speeds;
uint16_t auto_link_speed;
+ uint16_t force_link_speed;
uint16_t auto_link_speed_mask;
uint32_t preemphasis;
uint8_t phy_type;
@@ -180,6 +182,53 @@ struct rte_flow {
struct bnxt_vnic_info *vnic;
};
+struct bnxt_ptp_cfg {
+#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
+#define BNXT_GRCPF_REG_SYNC_TIME 0x480
+#define BNXT_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+ struct rte_timecounter tc;
+ struct rte_timecounter tx_tstamp_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct bnxt *bp;
+#define BNXT_MAX_TX_TS 1
+ uint16_t rxctl;
+#define BNXT_PTP_MSG_SYNC (1 << 0)
+#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
+#define BNXT_PTP_MSG_PDELAY_REQ (1 << 2)
+#define BNXT_PTP_MSG_PDELAY_RESP (1 << 3)
+#define BNXT_PTP_MSG_FOLLOW_UP (1 << 8)
+#define BNXT_PTP_MSG_DELAY_RESP (1 << 9)
+#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP (1 << 10)
+#define BNXT_PTP_MSG_ANNOUNCE (1 << 11)
+#define BNXT_PTP_MSG_SIGNALING (1 << 12)
+#define BNXT_PTP_MSG_MANAGEMENT (1 << 13)
+#define BNXT_PTP_MSG_EVENTS (BNXT_PTP_MSG_SYNC | \
+ BNXT_PTP_MSG_DELAY_REQ | \
+ BNXT_PTP_MSG_PDELAY_REQ | \
+ BNXT_PTP_MSG_PDELAY_RESP)
+ uint8_t tx_tstamp_en:1;
+ int rx_filter;
+
+#define BNXT_PTP_RX_TS_L 0
+#define BNXT_PTP_RX_TS_H 1
+#define BNXT_PTP_RX_SEQ 2
+#define BNXT_PTP_RX_FIFO 3
+#define BNXT_PTP_RX_FIFO_PENDING 0x1
+#define BNXT_PTP_RX_FIFO_ADV 4
+#define BNXT_PTP_RX_REGS 5
+
+#define BNXT_PTP_TX_TS_L 0
+#define BNXT_PTP_TX_TS_H 1
+#define BNXT_PTP_TX_SEQ 2
+#define BNXT_PTP_TX_FIFO 3
+#define BNXT_PTP_TX_FIFO_EMPTY 0x2
+#define BNXT_PTP_TX_REGS 4
+ uint32_t rx_regs[BNXT_PTP_RX_REGS];
+ uint32_t rx_mapped_regs[BNXT_PTP_RX_REGS];
+ uint32_t tx_regs[BNXT_PTP_TX_REGS];
+ uint32_t tx_mapped_regs[BNXT_PTP_TX_REGS];
+};
+
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt {
void *bar0;
@@ -195,10 +244,14 @@ struct bnxt {
#define BNXT_FLAG_JUMBO (1 << 3)
#define BNXT_FLAG_SHORT_CMD (1 << 4)
#define BNXT_FLAG_UPDATE_HASH (1 << 5)
+#define BNXT_FLAG_PTP_SUPPORTED (1 << 6)
+#define BNXT_FLAG_MULTI_HOST (1 << 7)
+#define BNXT_FLAG_INIT_DONE (1 << 31)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
-#define BNXT_NPAR_ENABLED(bp) ((bp)->port_partition_type)
-#define BNXT_NPAR_PF(bp) (BNXT_PF(bp) && BNXT_NPAR_ENABLED(bp))
+#define BNXT_NPAR(bp) ((bp)->port_partition_type)
+#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
+#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
unsigned int rx_nr_rings;
unsigned int rx_cp_nr_rings;
@@ -272,6 +325,7 @@ struct bnxt {
struct bnxt_led_info leds[BNXT_MAX_LED];
uint8_t num_leds;
+ struct bnxt_ptp_cfg *ptp_cfg;
};
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
@@ -281,4 +335,12 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
bool is_bnxt_supported(struct rte_eth_dev *dev);
extern const struct rte_flow_ops bnxt_flow_ops;
+
+extern int bnxt_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, bnxt_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt, ## args)
#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 19c684ca..737bb060 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -57,8 +57,17 @@ void bnxt_handle_async_event(struct bnxt *bp,
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
bnxt_link_update_op(bp->eth_dev, 1);
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
+ PMD_DRV_LOG(INFO, "Port conn async event\n");
+ break;
default:
- RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id);
+ PMD_DRV_LOG(INFO, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
@@ -74,7 +83,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
int rc;
if (bp->pf.active_vfs <= 0) {
- RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n");
+ PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
return;
}
@@ -93,7 +102,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
if (fw_vf_id < bp->pf.first_vf_id ||
fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
fw_vf_id, bp->pf.first_vf_id,
(bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
@@ -130,7 +139,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
/* Forward */
rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to send FWD req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
@@ -141,7 +150,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
reject:
rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to send REJECT req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
@@ -165,7 +174,6 @@ int bnxt_alloc_def_cp_ring(struct bnxt *bp)
goto err_out;
cpr->cp_doorbell = bp->pdev->mem_resource[2].addr;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
- bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
if (BNXT_PF(bp))
rc = bnxt_hwrm_func_cfg_def_cp(bp);
else
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 3b6813cb..21c46f83 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -35,7 +35,7 @@
#include <stdbool.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
@@ -58,6 +58,7 @@
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
+int bnxt_logtype_driver;
#define PCI_VENDOR_ID_BROADCOM 0x14E4
@@ -201,7 +202,7 @@ alloc_mem_err:
static int bnxt_init_chip(struct bnxt *bp)
{
- unsigned int i, rss_idx, fw_idx;
+ unsigned int i;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -223,25 +224,25 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
goto err_out;
}
rc = bnxt_alloc_hwrm_rings(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
goto err_out;
}
rc = bnxt_alloc_all_hwrm_ring_grps(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
goto err_out;
}
rc = bnxt_mq_rx_configure(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
goto err_out;
}
@@ -251,14 +252,14 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
+ PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
i, rc);
goto err_out;
}
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d ctx alloc failure rc: %x\n",
i, rc);
goto err_out;
@@ -266,39 +267,24 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
+ PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
i, rc);
goto err_out;
}
rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d filter failure rc: %x\n",
i, rc);
goto err_out;
}
- if (vnic->rss_table && vnic->hash_type) {
- /*
- * Fill the RSS hash & redirection table with
- * ring group ids for all VNICs
- */
- for (rss_idx = 0, fw_idx = 0;
- rss_idx < HW_HASH_INDEX_SIZE;
- rss_idx++, fw_idx++) {
- if (vnic->fw_grp_ids[fw_idx] ==
- INVALID_HW_RING_ID)
- fw_idx = 0;
- vnic->rss_table[rss_idx] =
- vnic->fw_grp_ids[fw_idx];
- }
- rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
- if (rc) {
- RTE_LOG(ERR, PMD,
- "HWRM vnic %d set RSS failure rc: %x\n",
- i, rc);
- goto err_out;
- }
+
+ rc = bnxt_vnic_rss_configure(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM vnic set RSS failure rc: %x\n", rc);
+ goto err_out;
}
bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
@@ -310,7 +296,7 @@ static int bnxt_init_chip(struct bnxt *bp)
}
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM cfa l2 rx mask failure rc: %x\n", rc);
goto err_out;
}
@@ -320,10 +306,9 @@ static int bnxt_init_chip(struct bnxt *bp)
!RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = bp->eth_dev->data->nb_rx_queues;
- RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__,
- intr_vector);
+ PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
if (intr_vector > bp->rx_cp_nr_rings) {
- RTE_LOG(ERR, PMD, "At most %d intr queues supported",
+ PMD_DRV_LOG(ERR, "At most %d intr queues supported",
bp->rx_cp_nr_rings);
return -ENOTSUP;
}
@@ -337,13 +322,13 @@ static int bnxt_init_chip(struct bnxt *bp)
bp->eth_dev->data->nb_rx_queues *
sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues"
+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec", bp->eth_dev->data->nb_rx_queues);
return -ENOMEM;
}
- RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p "
+ PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
- __func__, intr_handle->intr_vec, intr_handle->nb_efd,
+ intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->max_intr);
}
@@ -359,14 +344,14 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
goto err_out;
}
if (!bp->link_info.link_up) {
rc = bnxt_set_hwrm_link_config(bp, true);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM link config failure rc: %x\n", rc);
goto err_out;
}
@@ -378,6 +363,10 @@ static int bnxt_init_chip(struct bnxt *bp)
err_out:
bnxt_free_all_hwrm_resources(bp);
+ /* Some of the error status returned by FW may not be from errno.h */
+ if (rc > 0)
+ rc = -EIO;
+
return rc;
}
@@ -393,7 +382,10 @@ static int bnxt_init_nic(struct bnxt *bp)
{
int rc;
- bnxt_init_ring_grps(bp);
+ rc = bnxt_init_ring_grps(bp);
+ if (rc)
+ return rc;
+
bnxt_init_vnics(bp);
bnxt_init_filters(bp);
@@ -523,6 +515,26 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
bp->tx_queues = (void *)eth_dev->data->tx_queues;
/* Inherit new configurations */
+ if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
+ eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
+ eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + 1 >
+ bp->max_cp_rings ||
+ eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
+ bp->max_stat_ctx ||
+ (uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
+ PMD_DRV_LOG(ERR,
+ "Insufficient resources to support requested config\n");
+ PMD_DRV_LOG(ERR,
+ "Num Queues Requested: Tx %d, Rx %d\n",
+ eth_dev->data->nb_tx_queues,
+ eth_dev->data->nb_rx_queues);
+ PMD_DRV_LOG(ERR,
+ "Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
+ bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
+ bp->max_stat_ctx, bp->max_ring_grps);
+ return -ENOSPC;
+ }
+
bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
bp->rx_cp_nr_rings = bp->rx_nr_rings;
@@ -540,13 +552,13 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
struct rte_eth_link *link = &eth_dev->data->dev_link;
if (link->link_status)
- RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
+ PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- RTE_LOG(INFO, PMD, "Port %d Link Down\n",
+ PMD_DRV_LOG(INFO, "Port %d Link Down\n",
eth_dev->data->port_id);
}
@@ -563,7 +575,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
int rc;
if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
@@ -583,6 +595,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
if (rc)
goto error;
+ bp->flags |= BNXT_FLAG_INIT_DONE;
return 0;
error:
@@ -628,6 +641,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
}
bnxt_set_hwrm_link_config(bp, false);
bnxt_hwrm_port_clr_stats(bp);
+ bp->flags &= ~BNXT_FLAG_INIT_DONE;
bnxt_shutdown_nic(bp);
bp->dev_stopped = 1;
}
@@ -700,25 +714,25 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
struct bnxt_filter_info *filter;
if (BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
+ PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
return -ENOTSUP;
}
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
+ PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
return -EINVAL;
}
/* Attach requested MAC address to the new l2_filter */
STAILQ_FOREACH(filter, &vnic->filter, next) {
if (filter->mac_index == index) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC addr already existed for pool %d\n", pool);
- return -EINVAL;
+ return 0;
}
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
return -ENODEV;
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
@@ -741,7 +755,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
if (rc) {
new.link_speed = ETH_LINK_SPEED_100M;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
@@ -832,7 +846,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
if (reta_size != HW_HASH_INDEX_SIZE) {
- RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
@@ -864,12 +878,12 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
if (reta_size != HW_HASH_INDEX_SIZE) {
- RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
}
- /* EW - need to revisit here copying from u64 to u16 */
+ /* EW - need to revisit here copying from uint64_t to uint16_t */
memcpy(reta_conf, vnic->rss_table, reta_size);
if (rte_intr_allow_others(intr_handle)) {
@@ -895,7 +909,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
*/
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
- RTE_LOG(ERR, PMD, "Hash type NONE\n");
+ PMD_DRV_LOG(ERR, "Hash type NONE\n");
} else {
if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
return -EINVAL;
@@ -984,7 +998,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
if (hash_types) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unknwon RSS config from firmware (%08x), RSS disabled",
vnic->hash_type);
return -ENOTSUP;
@@ -1032,8 +1046,8 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
+ if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
+ PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
return -ENOTSUP;
}
@@ -1093,10 +1107,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
- RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->vxlan_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->vxlan_port_cnt++;
@@ -1108,10 +1122,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
- RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->geneve_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->geneve_port_cnt++;
@@ -1122,7 +1136,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
bp->geneve_port_cnt++;
break;
default:
- RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}
rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
@@ -1142,11 +1156,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
- RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->vxlan_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->vxlan_port);
return -EINVAL;
}
@@ -1159,11 +1173,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
- RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->geneve_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->geneve_port);
return -EINVAL;
}
@@ -1175,7 +1189,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
port = bp->geneve_fw_dst_port_id;
break;
default:
- RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}
@@ -1232,7 +1246,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@@ -1250,7 +1264,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Del Vlan filter for %d\n",
vlan_id);
}
@@ -1305,7 +1319,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
}
new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@@ -1325,7 +1339,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Added Vlan filter for %d\n", vlan_id);
cont:
filter = temp_filter;
@@ -1360,7 +1374,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
for (i = 0; i < 4095; i++)
bnxt_del_vlan_filter(bp, i);
}
- RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
+ PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_filter);
}
@@ -1374,12 +1388,12 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
vnic->vlan_strip = false;
bnxt_hwrm_vnic_cfg(bp, vnic);
}
- RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
+ PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_strip);
}
if (mask & ETH_VLAN_EXTEND_MASK)
- RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
+ PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");
return 0;
}
@@ -1397,7 +1411,6 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
return;
memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
- memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
STAILQ_FOREACH(filter, &vnic->filter, next) {
/* Default Filter is at Index 0 */
@@ -1416,7 +1429,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
if (rc)
break;
filter->mac_index = 0;
- RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
+ PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
}
}
@@ -1519,7 +1532,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
- RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
+ PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
ETHER_MIN_MTU, max_dev_mtu);
return -EINVAL;
}
@@ -1537,7 +1550,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
eth_dev->data->mtu = new_mtu;
- RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
@@ -1563,8 +1576,8 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
uint16_t vlan = bp->vlan;
int rc;
- if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD,
+ if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
+ PMD_DRV_LOG(ERR,
"PVID cannot be modified for this function\n");
return -ENOTSUP;
}
@@ -1723,15 +1736,15 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
int match = 0;
*ret = 0;
- if (efilter->ether_type != ETHER_TYPE_IPv4 &&
- efilter->ether_type != ETHER_TYPE_IPv6) {
- RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in"
+ if (efilter->ether_type == ETHER_TYPE_IPv4 ||
+ efilter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
" ethertype filter.", efilter->ether_type);
*ret = -EINVAL;
goto exit;
}
if (efilter->queue >= bp->rx_nr_rings) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@@ -1739,7 +1752,7 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
if (vnic == NULL) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@@ -1790,7 +1803,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
return 0;
if (arg == NULL) {
- RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@@ -1807,7 +1820,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@@ -1851,11 +1864,11 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
next);
bnxt_free_filter(bp, filter1);
} else if (ret == 0) {
- RTE_LOG(ERR, PMD, "No matching filter found\n");
+ PMD_DRV_LOG(ERR, "No matching filter found\n");
}
break;
default:
- RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
goto error;
}
@@ -1874,7 +1887,7 @@ parse_ntuple_filter(struct bnxt *bp,
uint32_t en = 0;
if (nfilter->queue >= bp->rx_nr_rings) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
return -EINVAL;
}
@@ -1886,7 +1899,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid dst_port mask.");
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
return -EINVAL;
}
@@ -1904,7 +1917,7 @@ parse_ntuple_filter(struct bnxt *bp,
en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
break;
default:
- RTE_LOG(ERR, PMD, "invalid protocol mask.");
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
return -EINVAL;
}
@@ -1916,7 +1929,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid dst_ip mask.");
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
return -EINVAL;
}
@@ -1928,7 +1941,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid src_ip mask.");
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
return -EINVAL;
}
@@ -1940,7 +1953,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid src_port mask.");
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
return -EINVAL;
}
@@ -1953,7 +1966,8 @@ parse_ntuple_filter(struct bnxt *bp,
static struct bnxt_filter_info*
bnxt_match_ntuple_filter(struct bnxt *bp,
- struct bnxt_filter_info *bfilter)
+ struct bnxt_filter_info *bfilter,
+ struct bnxt_vnic_info **mvnic)
{
struct bnxt_filter_info *mfilter = NULL;
int i;
@@ -1972,8 +1986,11 @@ bnxt_match_ntuple_filter(struct bnxt *bp,
bfilter->dst_port == mfilter->dst_port &&
bfilter->dst_port_mask == mfilter->dst_port_mask &&
bfilter->flags == mfilter->flags &&
- bfilter->enables == mfilter->enables)
+ bfilter->enables == mfilter->enables) {
+ if (mvnic)
+ *mvnic = vnic;
return mfilter;
+ }
}
}
return NULL;
@@ -1985,22 +2002,22 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
enum rte_filter_op filter_op)
{
struct bnxt_filter_info *bfilter, *mfilter, *filter1;
- struct bnxt_vnic_info *vnic, *vnic0;
+ struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
int ret;
if (nfilter->flags != RTE_5TUPLE_FLAGS) {
- RTE_LOG(ERR, PMD, "only 5tuple is supported.");
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
return -EINVAL;
}
if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
- RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n");
+ PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
return -EINVAL;
}
bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@@ -2023,15 +2040,25 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
bfilter->ethertype = 0x800;
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
- mfilter = bnxt_match_ntuple_filter(bp, bfilter);
+ mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
- if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) {
- RTE_LOG(ERR, PMD, "filter exists.");
+ if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
+ bfilter->dst_id == mfilter->dst_id) {
+ PMD_DRV_LOG(ERR, "filter exists.\n");
ret = -EEXIST;
goto free_filter;
+ } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
+ bfilter->dst_id != mfilter->dst_id) {
+ mfilter->dst_id = vnic->fw_vnic_id;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
+ STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
+ PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
+ PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
+ goto free_filter;
}
if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
- RTE_LOG(ERR, PMD, "filter doesn't exist.");
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
ret = -ENOENT;
goto free_filter;
}
@@ -2050,11 +2077,11 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
}
ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
- STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info,
- next);
+ STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
bnxt_free_filter(bp, mfilter);
- bfilter->fw_l2_filter_id = -1;
+ mfilter->fw_l2_filter_id = -1;
bnxt_free_filter(bp, bfilter);
+ bfilter->fw_l2_filter_id = -1;
}
return 0;
@@ -2076,7 +2103,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
return 0;
if (arg == NULL) {
- RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@@ -2093,7 +2120,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
filter_op);
break;
default:
- RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
break;
}
@@ -2295,7 +2322,7 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
if (vnic == NULL) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
return -EINVAL;
}
@@ -2399,7 +2426,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
/* FALLTHROUGH */
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new flow.\n");
return -ENOMEM;
}
@@ -2411,12 +2438,12 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
match = bnxt_match_fdir(bp, filter);
if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
- RTE_LOG(ERR, PMD, "Flow already exists.\n");
+ PMD_DRV_LOG(ERR, "Flow already exists.\n");
ret = -EEXIST;
goto free_filter;
}
if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
- RTE_LOG(ERR, PMD, "Flow does not exist.\n");
+ PMD_DRV_LOG(ERR, "Flow does not exist.\n");
ret = -ENOENT;
goto free_filter;
}
@@ -2463,10 +2490,10 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_STATS:
case RTE_ETH_FILTER_INFO:
/* FALLTHROUGH */
- RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op);
+ PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
break;
default:
- RTE_LOG(ERR, PMD, "unknown operation %u", filter_op);
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
ret = -EINVAL;
break;
}
@@ -2487,7 +2514,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
switch (filter_type) {
case RTE_ETH_FILTER_TUNNEL:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"filter type: %d: To be implemented\n", filter_type);
break;
case RTE_ETH_FILTER_FDIR:
@@ -2505,7 +2532,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
*(const void **)arg = &bnxt_flow_ops;
break;
default:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Filter type (%d) not supported", filter_type);
ret = -EINVAL;
break;
@@ -2536,7 +2563,260 @@ bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
return NULL;
}
+static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
+ int reg_win)
+{
+ uint32_t reg_base = *reg_arr & 0xfffff000;
+ uint32_t win_off;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if ((reg_arr[i] & 0xfffff000) != reg_base)
+ return -ERANGE;
+ }
+ win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
+ rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off));
+ return 0;
+}
+
+static int bnxt_map_ptp_regs(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t *reg_arr;
+ int rc, i;
+
+ reg_arr = ptp->rx_regs;
+ rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
+ if (rc)
+ return rc;
+
+ reg_arr = ptp->tx_regs;
+ rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < BNXT_PTP_RX_REGS; i++)
+ ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
+
+ for (i = 0; i < BNXT_PTP_TX_REGS; i++)
+ ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
+ return 0;
+}
+
+static void bnxt_unmap_ptp_regs(struct bnxt *bp)
+{
+ rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16));
+ rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20));
+}
+
+static uint64_t bnxt_cc_read(struct bnxt *bp)
+{
+ uint64_t ns;
+
+ ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_SYNC_TIME));
+ ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
+ return ns;
+}
+
+static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t fifo;
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
+ if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
+ return -EAGAIN;
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
+ *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
+ *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
+
+ return 0;
+}
+
+static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_pf_info *pf = &bp->pf;
+ uint16_t port_id;
+ uint32_t fifo;
+
+ if (!ptp)
+ return -ENODEV;
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
+ if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
+ return -EAGAIN;
+
+ port_id = pf->port_id;
+ rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]));
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
+ if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
+/* bnxt_clr_rx_ts(bp); TBD */
+ return -EBUSY;
+ }
+
+ *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
+ *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
+
+ return 0;
+}
+
+static int
+bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ ns = rte_timespec_to_ns(ts);
+ /* Set the timecounters to a new value. */
+ ptp->tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ systime_cycles = bnxt_cc_read(bp);
+ ns = rte_timecounter_update(&ptp->tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+static int
+bnxt_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t shift = 0;
+
+ if (!ptp)
+ return 0;
+
+ ptp->rx_filter = 1;
+ ptp->tx_tstamp_en = 1;
+ ptp->rxctl = BNXT_PTP_MSG_EVENTS;
+
+ if (!bnxt_hwrm_ptp_cfg(bp))
+ bnxt_map_ptp_regs(bp);
+
+ memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
+ memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
+ ptp->tc.cc_shift = shift;
+ ptp->tc.nsec_mask = (1ULL << shift) - 1;
+
+ ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
+ ptp->rx_tstamp_tc.cc_shift = shift;
+ ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
+ ptp->tx_tstamp_tc.cc_shift = shift;
+ ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ return 0;
+}
+
+static int
+bnxt_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ ptp->rx_filter = 0;
+ ptp->tx_tstamp_en = 0;
+ ptp->rxctl = 0;
+
+ bnxt_hwrm_ptp_cfg(bp);
+
+ bnxt_unmap_ptp_regs(bp);
+
+ return 0;
+}
+
+static int
+bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint64_t rx_tstamp_cycles = 0;
+ uint64_t ns;
+
+ if (!ptp)
+ return 0;
+
+ bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
+ ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+ return 0;
+}
+
+static int
+bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint64_t tx_tstamp_cycles = 0;
+ uint64_t ns;
+
+ if (!ptp)
+ return 0;
+
+ bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
+ ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ ptp->tc.nsec += delta;
+
+ return 0;
+}
static int
bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
@@ -2546,8 +2826,8 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
uint32_t dir_entries;
uint32_t entry_length;
- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n",
- __func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
+ bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);
rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
@@ -2565,8 +2845,8 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev,
uint32_t index;
uint32_t offset;
- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
- "len = %d\n", __func__, bp->pdev->addr.domain,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
@@ -2634,13 +2914,13 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;
- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
- "len = %d\n", __func__, bp->pdev->addr.domain,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n");
+ PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
return -EINVAL;
}
@@ -2727,11 +3007,22 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.rx_queue_count = bnxt_rx_queue_count_op,
.rx_descriptor_status = bnxt_rx_descriptor_status_op,
.tx_descriptor_status = bnxt_tx_descriptor_status_op,
+ .rx_queue_start = bnxt_rx_queue_start,
+ .rx_queue_stop = bnxt_rx_queue_stop,
+ .tx_queue_start = bnxt_tx_queue_start,
+ .tx_queue_stop = bnxt_tx_queue_stop,
.filter_ctrl = bnxt_filter_ctrl_op,
.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
.get_eeprom_length = bnxt_get_eeprom_length_op,
.get_eeprom = bnxt_get_eeprom_op,
.set_eeprom = bnxt_set_eeprom_op,
+ .timesync_enable = bnxt_timesync_enable,
+ .timesync_disable = bnxt_timesync_disable,
+ .timesync_read_time = bnxt_timesync_read_time,
+ .timesync_write_time = bnxt_timesync_write_time,
+ .timesync_adjust_time = bnxt_timesync_adjust_time,
+ .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
};
static bool bnxt_vf_pciid(uint16_t id)
@@ -2754,7 +3045,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
/* enable device (incl. PCI PM wakeup), and bus-mastering */
if (!pci_dev->mem_resource[0].addr) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto init_err_disable;
@@ -2765,7 +3056,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
if (!bp->bar0) {
- RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
+ PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
rc = -ENOMEM;
goto init_err_release;
}
@@ -2801,7 +3092,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
int rc;
if (version_printed++ == 0)
- RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
+ PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -2818,7 +3109,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
rc = bnxt_init_board(eth_dev);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Board initialization failed rc: %x\n", rc);
goto error;
}
@@ -2849,13 +3140,13 @@ skip_init:
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@@ -2884,13 +3175,13 @@ skip_init:
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@@ -2905,45 +3196,71 @@ skip_init:
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm resource allocation failure rc: %x\n", rc);
goto error_free;
}
rc = bnxt_hwrm_ver_get(bp);
if (rc)
goto error_free;
- bnxt_hwrm_queue_qportcfg(bp);
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
+ goto error_free;
+ }
- bnxt_hwrm_func_qcfg(bp);
+ rc = bnxt_hwrm_func_qcfg(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
+ goto error_free;
+ }
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
goto error_free;
}
if (bp->max_tx_rings == 0) {
- RTE_LOG(ERR, PMD, "No TX rings available!\n");
+ PMD_DRV_LOG(ERR, "No TX rings available!\n");
rc = -EBUSY;
goto error_free;
}
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
if (eth_dev->data->mac_addrs == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx);
rc = -ENOMEM;
goto error_free;
}
+
+ if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
+ bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
+ bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
+ rc = -EINVAL;
+ goto error_free;
+ }
/* Copy the permanent MAC from the qcap response address now. */
memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+
+ if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
+ /* 1 ring is for default completion ring */
+ PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
+ rc = -ENOSPC;
+ goto error_free;
+ }
+
bp->grp_info = rte_zmalloc("bnxt_grp_info",
sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
if (!bp->grp_info) {
- RTE_LOG(ERR, PMD,
- "Failed to alloc %zu bytes needed to store group info table\n",
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc %zu bytes to store group info table\n",
sizeof(*bp->grp_info) * bp->max_ring_grps);
rc = -ENOMEM;
goto error_free;
@@ -2955,7 +3272,7 @@ skip_init:
((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
} else {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Firmware too old for VF mailbox functionality\n");
memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
}
@@ -2975,21 +3292,21 @@ skip_init:
ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to register driver");
rc = -EBUSY;
goto error_free;
}
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);
rc = bnxt_hwrm_func_reset(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
- rc = -1;
+ PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
+ rc = -EIO;
goto error_free;
}
@@ -3000,13 +3317,13 @@ skip_init:
if (bp->pdev->max_vfs) {
rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
+ PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
goto error_free;
}
} else {
rc = bnxt_hwrm_allocate_pf_only(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to allocate PF resources\n");
goto error_free;
}
@@ -3115,6 +3432,15 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &bnxt_rte_pmd);
}
+RTE_INIT(bnxt_init_log);
+static void
+bnxt_init_log(void)
+{
+ bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
+ if (bnxt_logtype_driver >= 0)
+ rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
+}
+
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 65d30fb3..032e8eed 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -56,7 +56,7 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -77,7 +77,7 @@ struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
if (!filter) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
vf);
return NULL;
}
@@ -145,11 +145,11 @@ void bnxt_free_filter_mem(struct bnxt *bp)
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
- RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
+ PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
/* Call HWRM to try to free filter again */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM filter cannot be freed rc = %d\n",
rc);
}
@@ -172,7 +172,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
max_filters * sizeof(struct bnxt_filter_info),
0);
if (filter_mem == NULL) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
max_filters);
return -ENOMEM;
}
@@ -187,7 +187,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -250,7 +250,7 @@ nxt_non_void_action(const struct rte_flow_action *cur)
}
}
-static inline int check_zero_bytes(const uint8_t *bytes, int len)
+int check_zero_bytes(const uint8_t *bytes, int len)
{
int i;
for (i = 0; i < len; i++)
@@ -281,7 +281,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
/* FALLTHROUGH */
/* need ntuple match, reset exact match */
if (!use_ntuple) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VLAN flow cannot use NTUPLE filter\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -292,7 +292,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
use_ntuple |= 1;
break;
default:
- RTE_LOG(ERR, PMD, "Unknown Flow type");
+ PMD_DRV_LOG(ERR, "Unknown Flow type");
use_ntuple |= 1;
}
item++;
@@ -329,7 +329,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
int dflt_vnic;
use_ntuple = bnxt_filter_type_check(pattern, error);
- RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
+ PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
if (use_ntuple < 0)
return use_ntuple;
@@ -791,7 +791,7 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
return f0;
//This flow needs DST MAC which is not same as port/l2
- RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
+ PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
filter1 = bnxt_get_unused_filter(bp);
if (filter1 == NULL)
return NULL;
@@ -806,7 +806,6 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
bnxt_free_filter(bp, filter1);
return NULL;
}
- STAILQ_INSERT_TAIL(&vnic->filter, filter1, next);
return filter1;
}
@@ -829,7 +828,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
int rc;
if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
+ PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"Cannot create flow on RSS queues");
@@ -858,7 +857,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
rc = -rte_errno;
goto ret;
}
- RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
+ PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
@@ -876,7 +875,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- RTE_LOG(DEBUG, PMD, "VNIC found\n");
+ PMD_DRV_LOG(DEBUG, "VNIC found\n");
break;
case RTE_FLOW_ACTION_TYPE_DROP:
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
@@ -957,7 +956,11 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
-//done:
+ if (filter1) {
+ bnxt_free_filter(bp, filter1);
+ filter1->fw_l2_filter_id = -1;
+ }
+
act = nxt_non_void_action(++act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
rte_flow_error_set(error, EINVAL,
@@ -987,7 +990,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
return -ENOMEM;
}
@@ -1042,8 +1045,23 @@ bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
!memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
sizeof(nf->dst_ipaddr)) &&
!memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
- sizeof(nf->dst_ipaddr_mask)))
- return -EEXIST;
+ sizeof(nf->dst_ipaddr_mask))) {
+ if (mf->dst_id == nf->dst_id)
+ return -EEXIST;
+ /* Same Flow, Different queue
+ * Clear the old ntuple filter
+ */
+ if (nf->filter_type == HWRM_CFA_EM_FILTER)
+ bnxt_hwrm_clear_em_filter(bp, mf);
+ if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ bnxt_hwrm_clear_ntuple_filter(bp, mf);
+ /* Free the old filter, update flow
+ * with new filter
+ */
+ bnxt_free_filter(bp, mf);
+ flow->filter = nf;
+ return -EXDEV;
+ }
}
}
return 0;
@@ -1059,6 +1077,7 @@ bnxt_flow_create(struct rte_eth_dev *dev,
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt_filter_info *filter;
struct bnxt_vnic_info *vnic = NULL;
+ bool update_flow = false;
struct rte_flow *flow;
unsigned int i;
int ret = 0;
@@ -1073,13 +1092,13 @@ bnxt_flow_create(struct rte_eth_dev *dev,
ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Not a validate flow.\n");
+ PMD_DRV_LOG(ERR, "Not a validate flow.\n");
goto free_flow;
}
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
goto free_flow;
}
@@ -1089,9 +1108,17 @@ bnxt_flow_create(struct rte_eth_dev *dev,
goto free_filter;
ret = bnxt_match_filter(bp, filter);
- if (ret != 0) {
- RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
+ if (ret == -EEXIST) {
+ PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
+ /* Clear the filter that was created as part of
+ * validate_and_parse_flow() above
+ */
+ bnxt_hwrm_clear_l2_filter(bp, filter);
goto free_filter;
+ } else if (ret == -EXDEV) {
+ PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
+ PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
+ update_flow = true;
}
if (filter->filter_type == HWRM_CFA_EM_FILTER) {
@@ -1114,22 +1141,29 @@ bnxt_flow_create(struct rte_eth_dev *dev,
if (!ret) {
flow->filter = filter;
flow->vnic = vnic;
- RTE_LOG(ERR, PMD, "Successfully created flow.\n");
+ if (update_flow) {
+ ret = -EXDEV;
+ goto free_flow;
+ }
+ PMD_DRV_LOG(ERR, "Successfully created flow.\n");
STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
return flow;
}
free_filter:
- filter->fw_l2_filter_id = -1;
bnxt_free_filter(bp, filter);
free_flow:
if (ret == -EEXIST)
rte_flow_error_set(error, ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Matching Flow exists.");
+ else if (ret == -EXDEV)
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Flow with pattern exists, updating destination queue");
else
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to create flow.");
+ "Failed to create flow.");
rte_free(flow);
flow = NULL;
return flow;
@@ -1147,12 +1181,13 @@ bnxt_flow_destroy(struct rte_eth_dev *dev,
ret = bnxt_match_filter(bp, filter);
if (ret == 0)
- RTE_LOG(ERR, PMD, "Could not find matching flow\n");
+ PMD_DRV_LOG(ERR, "Could not find matching flow\n");
if (filter->filter_type == HWRM_CFA_EM_FILTER)
ret = bnxt_hwrm_clear_em_filter(bp, filter);
if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
if (!ret) {
STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
rte_free(flow);
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index 2591a87e..a3c702df 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -97,6 +97,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp);
void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic);
+int check_zero_bytes(const uint8_t *bytes, int len);
#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d2c800dd..b7843afe 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -79,7 +79,7 @@ static int page_getenum(size_t size)
return 22;
if (size <= 1 << 30)
return 30;
- RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
+ PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
return sizeof(void *) * 8 - 1;
}
@@ -161,7 +161,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
}
if (i >= HWRM_CMD_TIMEOUT) {
- RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
+ PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
req->req_type);
goto err_ret;
}
@@ -194,8 +194,7 @@ err_ret:
#define HWRM_CHECK_RESULT() do {\
if (rc) { \
- RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
- __func__, rc); \
+ PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
} \
@@ -204,18 +203,15 @@ err_ret:
if (resp->resp_len >= 16) { \
struct hwrm_err_output *tmp_hwrm_err_op = \
(void *)resp; \
- RTE_LOG(ERR, PMD, \
- "%s error %d:%d:%08x:%04x\n", \
- __func__, \
+ PMD_DRV_LOG(ERR, \
+ "error %d:%d:%08x:%04x\n", \
rc, tmp_hwrm_err_op->cmd_err, \
rte_le_to_cpu_32(\
tmp_hwrm_err_op->opaque_0), \
rte_le_to_cpu_16(\
tmp_hwrm_err_op->opaque_1)); \
- } \
- else { \
- RTE_LOG(ERR, PMD, \
- "%s error %d\n", __func__, rc); \
+ } else { \
+ PMD_DRV_LOG(ERR, "error %d\n", rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
@@ -369,7 +365,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
- RTE_LOG(DEBUG, PMD,
+ PMD_DRV_LOG(DEBUG,
"Add vlan %u to vmdq pool %u\n",
conf->pool_map[j].vlan_id, j);
@@ -427,12 +423,95 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
return rc;
}
+int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
+{
+ struct hwrm_port_mac_cfg_input req = {.req_type = 0};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t flags = 0;
+ int rc;
+
+ if (!ptp)
+ return 0;
+
+ HWRM_PREP(req, PORT_MAC_CFG);
+
+ if (ptp->rx_filter)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ else
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ if (ptp->tx_tstamp_en)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
+ else
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+ req.flags = rte_cpu_to_le_32(flags);
+ req.enables =
+ rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
+ struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
+ if (ptp)
+ return 0;
+
+ HWRM_PREP(req, PORT_MAC_PTP_QCFG);
+
+ req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
+ return 0;
+
+ ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
+ if (!ptp)
+ return -ENOMEM;
+
+ ptp->rx_regs[BNXT_PTP_RX_TS_L] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
+ ptp->rx_regs[BNXT_PTP_RX_TS_H] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
+ ptp->rx_regs[BNXT_PTP_RX_SEQ] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
+ ptp->rx_regs[BNXT_PTP_RX_FIFO] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
+ ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
+ ptp->tx_regs[BNXT_PTP_TX_TS_L] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
+ ptp->tx_regs[BNXT_PTP_TX_TS_H] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
+ ptp->tx_regs[BNXT_PTP_TX_SEQ] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
+ ptp->tx_regs[BNXT_PTP_TX_FIFO] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
+
+ ptp->bp = bp;
+ bp->ptp_cfg = ptp;
+
+ return 0;
+}
+
int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {.req_type = 0 };
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
uint16_t new_max_vfs;
+ uint32_t flags;
int i;
HWRM_PREP(req, FUNC_QCAPS);
@@ -444,6 +523,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
HWRM_CHECK_RESULT();
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+ flags = rte_le_to_cpu_32(resp->flags);
if (BNXT_PF(bp)) {
bp->pf.port_id = resp->port_id;
bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
@@ -461,7 +541,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_table == NULL)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Fail to alloc VLAN table for VF %d\n",
i);
else
@@ -472,7 +552,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_as_table == NULL)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Alloc VLAN AS table for VF %d fail\n",
i);
else
@@ -500,8 +580,16 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->max_vnics = 1;
}
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
- if (BNXT_PF(bp))
+ if (BNXT_PF(bp)) {
bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
+ bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
+ PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
+ HWRM_UNLOCK();
+ bnxt_hwrm_ptp_qcfg(bp);
+ }
+ }
+
HWRM_UNLOCK();
return rc;
@@ -549,8 +637,13 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
sizeof(bp->pf.vf_req_fwd)));
}
- req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
- //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
+ req.async_event_fwd[0] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
+ ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
+ ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
+ req.async_event_fwd[1] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
+ ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -584,13 +677,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_CHECK_RESULT();
- RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
+ PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
resp->hwrm_intf_upd,
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
(resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
- RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
+ PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
my_version = HWRM_VERSION_MAJOR << 16;
@@ -602,28 +695,28 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
fw_version |= resp->hwrm_intf_upd;
if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
- RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
+ PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
rc = -EINVAL;
goto error;
}
if (my_version != fw_version) {
- RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
+ PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
if (my_version < fw_version) {
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Firmware API version is newer than driver.\n");
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"The driver may be missing features.\n");
} else {
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Firmware API version is older than driver.\n");
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Not all driver features may be functional.\n");
}
}
if (bp->max_req_len > resp->max_req_win_len) {
- RTE_LOG(ERR, PMD, "Unsupported request length\n");
+ PMD_DRV_LOG(ERR, "Unsupported request length\n");
rc = -EINVAL;
}
bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
@@ -646,7 +739,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@@ -658,7 +751,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
- RTE_LOG(DEBUG, PMD, "Short command supported\n");
+ PMD_DRV_LOG(DEBUG, "Short command supported\n");
rte_free(bp->hwrm_short_cmd_req_addr);
@@ -673,7 +766,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unable to map buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@@ -722,7 +815,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
if (bp->link_info.auto_mode && conf->link_speed) {
req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
- RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
+ PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
}
req.flags = rte_cpu_to_le_32(conf->phy_flags);
@@ -738,7 +831,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
}
/* AutoNeg - Advertise speeds specified. */
- if (conf->auto_link_speed_mask) {
+ if (conf->auto_link_speed_mask &&
+ !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
req.auto_mode =
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
req.auto_link_speed_mask =
@@ -761,7 +855,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
} else {
req.flags =
rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
- RTE_LOG(INFO, PMD, "Force Link Down\n");
+ PMD_DRV_LOG(INFO, "Force Link Down\n");
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -801,12 +895,22 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
+ link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
link_info->phy_ver[0] = resp->phy_maj;
link_info->phy_ver[1] = resp->phy_min;
link_info->phy_ver[2] = resp->phy_bld;
HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
+ PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
+ PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
+ PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
+ PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
+ link_info->auto_link_speed_mask);
+ PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
+ link_info->force_link_speed);
+
return rc;
}
@@ -879,7 +983,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
break;
default:
- RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
+ PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
ring_type);
HWRM_UNLOCK();
return -1;
@@ -893,22 +997,22 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc rx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc tx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
default:
- RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
}
@@ -940,19 +1044,19 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
rc);
return rc;
default:
- RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
return rc;
}
}
@@ -1046,7 +1150,6 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
HWRM_UNLOCK();
- bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
return rc;
}
@@ -1077,7 +1180,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
/* map ring groups to this vnic */
- RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
+ PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
@@ -1097,7 +1200,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
HWRM_UNLOCK();
- RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}
@@ -1167,7 +1270,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct bnxt_plcmodes_cfg pmodes;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}
@@ -1232,7 +1335,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
return rc;
}
HWRM_PREP(req, VNIC_QCFG);
@@ -1284,7 +1387,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
HWRM_UNLOCK();
- RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
@@ -1297,7 +1400,7 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
bp->hwrm_cmd_resp_addr;
if (vnic->rss_rule == 0xffff) {
- RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
@@ -1321,7 +1424,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
return rc;
}
@@ -1569,19 +1672,15 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
- if (i >= bp->rx_cp_nr_rings)
+ if (i >= bp->rx_cp_nr_rings) {
cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
- else
+ } else {
cpr = bp->rx_queues[i]->cp_ring;
+ bp->grp_info[i].fw_stats_ctx = -1;
+ }
if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
- /*
- * TODO. Need a better way to reset grp_info.stats_ctx
- * for Rx rings only. stats_ctx is not saved for Tx
- * in grp_info.
- */
- bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
if (rc)
return rc;
}
@@ -1641,7 +1740,6 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
bnxt_hwrm_ring_free(bp, cp_ring,
HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
cp_ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
sizeof(*cpr->cp_desc_ring));
cpr->cp_raw_cons = 0;
@@ -1697,10 +1795,17 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
rxr->rx_ring_struct->ring_size *
sizeof(*rxr->rx_buf_ring));
rxr->rx_prod = 0;
+ }
+ ring = rxr->ag_ring_struct;
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
memset(rxr->ag_buf_ring, 0,
- rxr->ag_ring_struct->ring_size *
- sizeof(*rxr->ag_buf_ring));
+ rxr->ag_ring_struct->ring_size *
+ sizeof(*rxr->ag_buf_ring));
rxr->ag_prod = 0;
+ bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, idx);
@@ -1761,7 +1866,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -1797,7 +1902,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
STAILQ_FOREACH(flow, &vnic->flow_list, next) {
filter = flow->filter;
- RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
+ PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
if (filter->filter_type == HWRM_CFA_EM_FILTER)
rc = bnxt_hwrm_clear_em_filter(bp, filter);
else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
@@ -1938,8 +2043,12 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
+ case ETH_LINK_SPEED_100G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
+ break;
default:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported link speed %d; default to AUTO\n",
conf_link_speed);
break;
@@ -1950,7 +2059,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
- ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
+ ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
{
@@ -1963,20 +2072,20 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
if (one_speed & (one_speed - 1)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Invalid advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported advertised speed (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
} else {
if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
@@ -2014,6 +2123,8 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
if (link_speed & ETH_LINK_SPEED_50G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
+ if (link_speed & ETH_LINK_SPEED_100G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
return ret;
}
@@ -2046,9 +2157,12 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
eth_link_speed = ETH_SPEED_NUM_50G;
break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
+ eth_link_speed = ETH_SPEED_NUM_100G;
+ break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
- RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
+ PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
hw_link_speed);
break;
}
@@ -2068,7 +2182,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
eth_link_duplex = ETH_LINK_HALF_DUPLEX;
break;
default:
- RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
+ PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
hw_link_duplex);
break;
}
@@ -2082,7 +2196,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Get link config failed with rc %d\n", rc);
goto exit;
}
@@ -2107,7 +2221,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
struct bnxt_link_info link_req;
uint16_t speed, autoneg;
- if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
+ if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
return 0;
rc = bnxt_valid_link_speed(dev_conf->link_speeds,
@@ -2123,7 +2237,9 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
- if (autoneg == 1) {
+ /* Autoneg can be done only when the FW allows */
+ if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
+ bp->link_info.force_link_speed)) {
link_req.phy_flags |=
HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
link_req.auto_link_speed_mask =
@@ -2136,12 +2252,18 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
bp->link_info.media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
- RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
+ PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
return -EINVAL;
}
link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
- link_req.link_speed = speed;
+ /* If user wants a particular speed try that first. */
+ if (speed)
+ link_req.link_speed = speed;
+ else if (bp->link_info.force_link_speed)
+ link_req.link_speed = bp->link_info.force_link_speed;
+ else
+ link_req.link_speed = bp->link_info.auto_link_speed;
}
link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
link_req.auto_pause = bp->link_info.auto_pause;
@@ -2150,7 +2272,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Set link config failed with rc %d\n", rc);
}
@@ -2163,6 +2285,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t flags;
int rc = 0;
HWRM_PREP(req, FUNC_QCFG);
@@ -2174,6 +2297,9 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
/* Hard Coded.. 0xfff VLAN ID mask */
bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
+ flags = rte_le_to_cpu_16(resp->flags);
+ if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
+ bp->flags |= BNXT_FLAG_MULTI_HOST;
switch (resp->port_partition_type) {
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
@@ -2323,11 +2449,11 @@ static void reserve_resources_from_vf(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
- RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
}
@@ -2358,11 +2484,11 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
- RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_16(resp->vlan);
@@ -2398,7 +2524,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
int rc;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}
@@ -2425,7 +2551,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
size_t req_buf_sz;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}
@@ -2491,9 +2617,9 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
if (rc || resp->error_code) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to initizlie VF %d\n", i);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not all VFs available. (%d, %d)\n",
rc, resp->error_code);
HWRM_UNLOCK();
@@ -2643,7 +2769,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
req.req_buf_page_addr[0] =
rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
return -ENOMEM;
}
@@ -3065,7 +3191,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3101,7 +3227,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3162,7 +3288,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3219,19 +3345,19 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
if (req.vnic_id_tbl_addr == 0) {
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map VNIC ID table address to physical memory\n");
return -ENOMEM;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
@@ -3362,7 +3488,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
}
}
/* Could not find a default VNIC. */
- RTE_LOG(ERR, PMD, "No default VNIC\n");
+ PMD_DRV_LOG(ERR, "No default VNIC\n");
exit:
rte_free(vnic_ids);
return -1;
@@ -3452,7 +3578,7 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
if (filter->fw_em_filter_id == UINT64_MAX)
return 0;
- RTE_LOG(ERR, PMD, "Clear EM filter\n");
+ PMD_DRV_LOG(ERR, "Clear EM filter\n");
HWRM_PREP(req, CFA_EM_FLOW_FREE);
req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
@@ -3575,7 +3701,34 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
HWRM_UNLOCK();
filter->fw_ntuple_filter_id = -1;
- filter->fw_l2_filter_id = -1;
return 0;
}
+
+int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ unsigned int rss_idx, fw_idx, i;
+
+ if (vnic->rss_table && vnic->hash_type) {
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ fw_idx %= bp->rx_cp_nr_rings;
+ if (vnic->fw_grp_ids[fw_idx] !=
+ INVALID_HW_RING_ID)
+ break;
+ fw_idx++;
+ }
+ if (i == bp->rx_cp_nr_rings)
+ return 0;
+ vnic->rss_table[rss_idx] =
+ vnic->fw_grp_ids[fw_idx];
+ }
+ return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 85083e61..f11e72a3 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -42,6 +42,17 @@ struct bnxt_filter_info;
struct bnxt_cp_ring_info;
#define HWRM_SEQ_ID_INVALID -1U
+/* Convert Bit field location to value */
+#define ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED)
+#define ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD - 32))
+#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32))
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
@@ -175,4 +186,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
uint16_t dir_ordinal, uint16_t dir_ext,
uint16_t dir_attr, const uint8_t *data,
size_t data_len);
+int bnxt_hwrm_ptp_cfg(struct bnxt *bp);
+int bnxt_vnic_rss_configure(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
#endif
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 49436cfd..8ab98693 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -84,7 +84,7 @@ static void bnxt_int_handler(void *param)
cpr->cp_ring_struct))
goto no_more;
}
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Ignoring %02x completion\n", CMP_TYPE(cmp));
break;
}
@@ -154,7 +154,7 @@ int bnxt_setup_int(struct bnxt *bp)
return 0;
setup_exit:
- RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed\n");
+ PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
return rc;
}
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 0fa2f0c0..8fb89721 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -63,13 +63,15 @@ void bnxt_free_ring(struct bnxt_ring *ring)
* Ring groups
*/
-void bnxt_init_ring_grps(struct bnxt *bp)
+int bnxt_init_ring_grps(struct bnxt *bp)
{
unsigned int i;
for (i = 0; i < bp->max_ring_grps; i++)
memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
sizeof(struct bnxt_ring_grp_info));
+
+ return 0;
}
/*
@@ -174,15 +176,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map ring address to physical memory\n");
return -ENOMEM;
}
@@ -324,7 +326,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
ring = rxr->ag_ring_struct;
/* Agg ring */
if (ring == NULL) {
- RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n");
+ PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
goto err_out;
}
@@ -334,7 +336,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
cp_ring->fw_ring_id);
if (rc)
goto err_out;
- RTE_LOG(DEBUG, PMD, "Alloc AGG Done!\n");
+ PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
rxr->ag_prod = 0;
rxr->ag_doorbell =
(char *)pci_dev->mem_resource[2].addr +
@@ -345,7 +347,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bnxt_init_one_rx_ring(rxq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
}
@@ -362,9 +364,6 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
struct bnxt_ring *ring = txr->tx_ring_struct;
unsigned int idx = i + 1 + bp->rx_cp_nr_rings;
- /* Account for AGG Rings. AGG ring cnt = Rx Cmpl ring cnt */
- idx += bp->rx_cp_nr_rings;
-
/* Tx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index 164f482e..ebf7228e 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -40,9 +40,6 @@
#define RING_NEXT(ring, idx) (((idx) + 1) & (ring)->ring_mask)
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_iova + (mb)->data_off))
-
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
#define DB_IRQ_DIS (0x1 << 27)
@@ -94,7 +91,7 @@ struct bnxt_tx_ring_info;
struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
void bnxt_free_ring(struct bnxt_ring *ring);
-void bnxt_init_ring_grps(struct bnxt *bp);
+int bnxt_init_ring_grps(struct bnxt *bp);
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
struct bnxt_tx_ring_info *tx_ring_info,
struct bnxt_rx_ring_info *rx_ring_info,
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index c4da474e..d49f3546 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -75,7 +75,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
if (bp->rx_cp_nr_rings < 2) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -92,7 +92,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -118,10 +118,10 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
pools = max_pools;
break;
case ETH_MQ_RX_RSS:
- pools = bp->rx_cp_nr_rings;
+ pools = 1;
break;
default:
- RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
+ PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
dev_conf->rxmode.mq_mode);
rc = -EINVAL;
goto err_out;
@@ -135,7 +135,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
for (i = 0; i < pools; i++) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -166,7 +166,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -311,8 +311,15 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
struct bnxt_rx_queue *rxq;
int rc = 0;
+ if (queue_idx >= bp->max_rx_rings) {
+ PMD_DRV_LOG(ERR,
+ "Cannot create Rx ring %d. Only %d rings available\n",
+ queue_idx, bp->max_rx_rings);
+ return -ENOSPC;
+ }
+
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -325,7 +332,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
@@ -334,8 +341,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
- RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size);
- RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
+ PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
@@ -350,7 +357,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate RX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
"rxr")) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
@@ -398,3 +405,56 @@ bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
}
return rc;
}
+
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_deferred_start = false;
+ PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+ return 0;
+ PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+ vnic->fw_grp_ids[rx_queue_id] =
+ bp->grp_info[rx_queue_id + 1].fw_grp_id;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ return 0;
+}
+
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 508731ee..c7acaa75 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -50,6 +50,7 @@ struct bnxt_rx_queue {
uint16_t reg_idx; /* RX queue register index */
uint16_t port_id; /* Device port identifier */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+ uint8_t rx_deferred_start; /* not in global dev start */
struct bnxt *bp;
int index;
@@ -59,8 +60,6 @@ struct bnxt_rx_queue {
uint32_t rx_buf_use_size; /* useable size */
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
-
- struct bnxt_tpa_info *rx_tpa;
};
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
@@ -77,5 +76,8 @@ int bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev,
uint16_t queue_id);
int bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev,
uint16_t queue_id);
-
+int bnxt_rx_queue_start(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 30891b74..aae9a635 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -65,17 +65,17 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
{
struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
- struct rte_mbuf *data;
+ struct rte_mbuf *mbuf;
- data = __bnxt_alloc_rx_data(rxq->mb_pool);
- if (!data) {
+ mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!mbuf) {
rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
return -ENOMEM;
}
- rx_buf->mbuf = data;
+ rx_buf->mbuf = mbuf;
- rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
+ rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
return 0;
}
@@ -86,23 +86,23 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
{
struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
- struct rte_mbuf *data;
+ struct rte_mbuf *mbuf;
- data = __bnxt_alloc_rx_data(rxq->mb_pool);
- if (!data) {
+ mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!mbuf) {
rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
return -ENOMEM;
}
if (rxbd == NULL)
- RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
if (rx_buf == NULL)
- RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
- rx_buf->mbuf = data;
+ rx_buf->mbuf = mbuf;
- rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
+ rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
return 0;
}
@@ -123,7 +123,7 @@ static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
prod_bd = &rxr->rx_desc_ring[prod];
- prod_bd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(mbuf));
+ prod_bd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxr->rx_prod = prod;
}
@@ -234,7 +234,7 @@ static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
/* TODO batch allocation for better performance */
while (rte_bitmap_get(rxr->ag_bitmap, next)) {
if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"agg mbuf alloc failed: prod=0x%x\n", next);
break;
}
@@ -338,41 +338,57 @@ static inline struct rte_mbuf *bnxt_tpa_end(
static uint32_t
bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
{
- uint32_t pkt_type = 0;
- uint32_t t_ipcs = 0, ip = 0, ip6 = 0;
- uint32_t tcp = 0, udp = 0, icmp = 0;
- uint32_t vlan = 0;
+ uint32_t l3, pkt_type = 0;
+ uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
+ uint32_t flags_type;
vlan = !!(rxcmp1->flags2 &
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
+ pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
+
t_ipcs = !!(rxcmp1->flags2 &
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
ip6 = !!(rxcmp1->flags2 &
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
- icmp = !!(rxcmp->flags_type &
- rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_ICMP));
- tcp = !!(rxcmp->flags_type &
- rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_TCP));
- udp = !!(rxcmp->flags_type &
- rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_UDP));
- ip = !!(rxcmp->flags_type &
- rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_IP));
-
- pkt_type |= ((ip || tcp || udp || icmp) && !t_ipcs && !ip6) ?
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0;
- pkt_type |= ((ip || tcp || udp || icmp) && !t_ipcs && ip6) ?
- RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0;
- pkt_type |= (!t_ipcs && icmp) ? RTE_PTYPE_L4_ICMP : 0;
- pkt_type |= (!t_ipcs && udp) ? RTE_PTYPE_L4_UDP : 0;
- pkt_type |= (!t_ipcs && tcp) ? RTE_PTYPE_L4_TCP : 0;
- pkt_type |= ((ip || tcp || udp || icmp) && t_ipcs && !ip6) ?
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN : 0;
- pkt_type |= ((ip || tcp || udp || icmp) && t_ipcs && ip6) ?
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN : 0;
- pkt_type |= (t_ipcs && icmp) ? RTE_PTYPE_INNER_L4_ICMP : 0;
- pkt_type |= (t_ipcs && udp) ? RTE_PTYPE_INNER_L4_UDP : 0;
- pkt_type |= (t_ipcs && tcp) ? RTE_PTYPE_INNER_L4_TCP : 0;
- pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : 0;
+
+ flags_type = rxcmp->flags_type &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
+
+ if (!t_ipcs && !ip6)
+ l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (!t_ipcs && ip6)
+ l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ else if (t_ipcs && !ip6)
+ l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ else
+ l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+
+ switch (flags_type) {
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_TCP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_UDP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
+ pkt_type |= l3;
+ break;
+ }
return pkt_type;
}
@@ -442,6 +458,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
rte_prefetch0(mbuf);
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
mbuf->next = NULL;
mbuf->pkt_len = rxcmp->len;
@@ -456,6 +473,10 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
}
+ if ((rxcmp->flags_type & rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_MASK)) ==
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)
+ mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+
if (agg_buf)
bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
@@ -470,12 +491,12 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
else
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_NONE;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
else
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
@@ -507,7 +528,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*/
prod = RING_NEXT(rxr->rx_ring_struct, prod);
if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
- RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
+ PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
rc = -ENOMEM;
goto rx;
}
@@ -540,6 +561,10 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t ag_prod = rxr->ag_prod;
int rc = 0;
+ /* If Rx Q was stopped return */
+ if (rxq->rx_deferred_start)
+ return 0;
+
/* Handle RX burst request */
while (1) {
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
@@ -596,7 +621,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxr->rx_prod = i;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
} else {
- RTE_LOG(ERR, PMD, "Alloc mbuf failed\n");
+ PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
break;
}
}
@@ -739,7 +764,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
prod = rxr->rx_prod;
for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"init'ed rx ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@@ -747,7 +772,6 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->rx_prod = prod;
prod = RING_NEXT(rxr->rx_ring_struct, prod);
}
- RTE_LOG(DEBUG, PMD, "%s\n", __func__);
ring = rxr->ag_ring_struct;
type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
@@ -756,7 +780,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"init'ed AG ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@@ -764,7 +788,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->ag_prod = prod;
prod = RING_NEXT(rxr->ag_ring_struct, prod);
}
- RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
+ PMD_DRV_LOG(DEBUG, "AGG Done!\n");
if (rxr->tpa_info) {
for (i = 0; i < BNXT_TPA_MAX; i++) {
@@ -776,7 +800,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
}
}
}
- RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
+ PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");
return 0;
}
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index a94373d1..f3ed49bd 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -120,5 +120,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
void bnxt_free_rx_rings(struct bnxt *bp);
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
-
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
#endif
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index fe83d370..bd93cc83 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -236,6 +236,10 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
struct bnxt *bp = eth_dev->data->dev_private;
memset(bnxt_stats, 0, sizeof(*bnxt_stats));
+ if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
+ return 0;
+ }
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
@@ -267,6 +271,11 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
+ return;
+ }
+
bnxt_clear_all_hwrm_stat_ctxs(bp);
rte_atomic64_clear(&bp->rx_mbuf_alloc_fail);
}
@@ -280,7 +289,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
uint64_t tx_drop_pkts;
if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
- RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
+ PMD_DRV_LOG(ERR, "xstats not supported for VF\n");
return 0;
}
@@ -358,15 +367,15 @@ void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
- if (bp->flags & BNXT_FLAG_PORT_STATS && !BNXT_NPAR_PF(bp))
+ if (bp->flags & BNXT_FLAG_PORT_STATS && BNXT_SINGLE_PF(bp))
bnxt_hwrm_port_clr_stats(bp);
if (BNXT_VF(bp))
- RTE_LOG(ERR, PMD, "Operation not supported on a VF device\n");
- if (BNXT_NPAR_PF(bp))
- RTE_LOG(ERR, PMD, "Operation not supported on a MF device\n");
+ PMD_DRV_LOG(ERR, "Operation not supported on a VF device\n");
+ if (!BNXT_SINGLE_PF(bp))
+ PMD_DRV_LOG(ERR, "Operation not supported on a MF device\n");
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
- RTE_LOG(ERR, PMD, "Operation not supported\n");
+ PMD_DRV_LOG(ERR, "Operation not supported\n");
}
int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
@@ -385,7 +394,7 @@ int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt);
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- RTE_LOG(ERR, PMD, "id value isn't valid");
+ PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
values[i] = values_copy[ids[i]];
@@ -411,7 +420,7 @@ int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- RTE_LOG(ERR, PMD, "id value isn't valid");
+ PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
strcpy(xstats_names[i].name,
diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h
index 51d16f5d..c1c83d57 100644
--- a/drivers/net/bnxt/bnxt_stats.h
+++ b/drivers/net/bnxt/bnxt_stats.h
@@ -34,7 +34,7 @@
#ifndef _BNXT_STATS_H_
#define _BNXT_STATS_H_
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
void bnxt_free_stats(struct bnxt *bp);
int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index 99dddddf..53524346 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -108,8 +108,15 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
struct bnxt_tx_queue *txq;
int rc = 0;
+ if (queue_idx >= bp->max_tx_rings) {
+ PMD_DRV_LOG(ERR,
+ "Cannot create Tx ring %d. Only %d rings available\n",
+ queue_idx, bp->max_tx_rings);
+ return -ENOSPC;
+ }
+
if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -124,7 +131,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq) {
- RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!");
+ PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
rc = -ENOMEM;
goto out;
}
@@ -142,14 +149,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate TX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring,
"txr")) {
- RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!");
+ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
}
if (bnxt_init_one_tx_ring(txq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index f753c10f..e27c34fa 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -71,5 +71,4 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-
#endif
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 8ca4bbd8..2c81a37c 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -101,7 +101,7 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
if (ring == NULL)
return -ENOMEM;
txr->tx_ring_struct = ring;
- ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+ ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)txr->tx_desc_ring;
ring->bd_dma = txr->tx_desc_mapping;
@@ -181,7 +181,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
else
txbd->flags_type |= lhint_arr[txbd->len >> 9];
- txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf));
+ txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf));
if (long_bd) {
txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
@@ -217,23 +217,28 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
tx_pkt->outer_l3_len;
txbd1->mss = tx_pkt->tso_segsz;
- } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
+ PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
+ PKT_TX_IIP_TCP_UDP_CKSUM) {
/* (Inner) IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
+ PKT_TX_OIP_TCP_UDP_CKSUM) {
/* Outer IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
+ PKT_TX_OIP_IIP_CKSUM) {
/* Outer IP, Inner IP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
+ PKT_TX_TCP_UDP_CKSUM) {
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
txbd1->mss = 0;
@@ -257,7 +262,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
txbd = &txr->tx_desc_ring[txr->tx_prod];
- txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(m_seg));
+ txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg));
txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
txbd->len = m_seg->data_len;
@@ -344,6 +349,11 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Handle TX completions */
bnxt_handle_tx_cp(txq);
+ /* Tx queue was stopped; wait for it to be restarted */
+ if (txq->tx_deferred_start) {
+ PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
+ return 0;
+ }
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) {
@@ -359,3 +369,30 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx_pkts;
}
+
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ txq->tx_deferred_start = false;
+ PMD_DRV_LOG(DEBUG, "Tx queue started\n");
+
+ return 0;
+}
+
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ /* Handle TX completions */
+ bnxt_handle_tx_cp(txq);
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ txq->tx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
+
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 2feac51d..d88b15ab 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -68,6 +68,8 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 5bac2605..d4aeb4ca 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -107,7 +107,7 @@ int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
}
temp = STAILQ_NEXT(temp, next);
}
- RTE_LOG(ERR, PMD, "VNIC %p is not found in pool[%d]\n", vnic, pool);
+ PMD_DRV_LOG(ERR, "VNIC %p is not found in pool[%d]\n", vnic, pool);
return -EINVAL;
}
@@ -118,7 +118,7 @@ struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
/* Find the 1st unused vnic from the free_vnic_list pool*/
vnic = STAILQ_FIRST(&bp->free_vnic_list);
if (!vnic) {
- RTE_LOG(ERR, PMD, "No more free VNIC resources\n");
+ PMD_DRV_LOG(ERR, "No more free VNIC resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
@@ -194,13 +194,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map vnic address to physical memory\n");
return -ENOMEM;
}
@@ -241,7 +241,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
- RTE_LOG(ERR, PMD, "VNIC is not freed yet!\n");
+ PMD_DRV_LOG(ERR, "VNIC is not freed yet!\n");
/* TODO Call HWRM to free VNIC */
}
}
@@ -260,7 +260,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
vnic_mem = rte_zmalloc("bnxt_vnic_info",
max_vnics * sizeof(struct bnxt_vnic_info), 0);
if (vnic_mem == NULL) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for %d VNICs",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d VNICs",
max_vnics);
return -ENOMEM;
}
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 875dc3c1..d8d35c7d 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -93,5 +93,4 @@ void bnxt_free_vnic_attributes(struct bnxt *bp);
int bnxt_alloc_vnic_attributes(struct bnxt *bp);
void bnxt_free_vnic_mem(struct bnxt *bp);
int bnxt_alloc_vnic_mem(struct bnxt *bp);
-
#endif
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index c16edbad..1e9c39f4 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -85,6 +85,7 @@
#define HWRM_PORT_CLR_STATS (UINT32_C(0x25))
#define HWRM_PORT_PHY_QCFG (UINT32_C(0x27))
#define HWRM_PORT_MAC_QCFG (UINT32_C(0x28))
+#define HWRM_PORT_MAC_PTP_QCFG (UINT32_C(0x29))
#define HWRM_PORT_PHY_QCAPS (UINT32_C(0x2a))
#define HWRM_PORT_LED_CFG (UINT32_C(0x2d))
#define HWRM_PORT_LED_QCFG (UINT32_C(0x2e))
@@ -909,7 +910,7 @@ struct rx_pkt_cmpl {
* This is the length of the data for the packet stored in the
* buffer(s) identified by the opaque value. This includes the
* packet BD and any associated buffer BDs. This does not
- * include the the length of any data places in aggregation BDs.
+ * include the length of any data places in aggregation BDs.
*/
uint32_t opaque;
/*
@@ -3275,7 +3276,7 @@ struct hwrm_func_cfg_input {
uint16_t fid;
/*
* Function ID of the function that is being configured. If set
- * to 0xFF... (All Fs), then the the configuration is for the
+ * to 0xFF... (All Fs), then the configuration is for the
* requesting function.
*/
uint8_t unused_0;
@@ -7121,6 +7122,227 @@ struct hwrm_queue_qportcfg_output {
*/
} __attribute__((packed));
+/*********************
+ * hwrm_port_mac_cfg *
+ *********************/
+
+
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
+struct hwrm_port_mac_cfg_input {
+ uint16_t req_type;
+ uint16_t cmpl_ring;
+ uint16_t seq_id;
+ uint16_t target_id;
+ uint64_t resp_addr;
+ uint32_t flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ uint32_t enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ uint16_t port_id;
+ uint8_t ipg;
+ uint8_t lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ uint8_t vlan_pri2cos_map_pri;
+ uint8_t reserved1;
+ uint8_t tunnel_pri2cos_map_pri;
+ uint8_t dscp2pri_map_pri;
+ uint16_t rx_ts_capture_ptp_msg_type;
+ uint16_t tx_ts_capture_ptp_msg_type;
+ uint8_t cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
+ (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
+ (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
+ (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
+ (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
+ PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
+ (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
+ (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
+ (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
+ (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
+ PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ uint8_t unused_0[3];
+};
+
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ uint16_t error_code;
+ uint16_t req_type;
+ uint16_t seq_id;
+ uint16_t resp_len;
+ uint16_t mru;
+ uint16_t mtu;
+ uint8_t ipg;
+ uint8_t lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ uint8_t unused_0;
+ uint8_t valid;
+};
+
+
+/**********************
+ * hwrm_port_mac_qcfg *
+ **********************/
+
+
+/* hwrm_port_mac_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_qcfg_input {
+ uint16_t req_type;
+ uint16_t cmpl_ring;
+ uint16_t seq_id;
+ uint16_t target_id;
+ uint64_t resp_addr;
+ uint16_t port_id;
+ uint8_t unused_0[6];
+};
+
+
+/* hwrm_port_mac_qcfg_output (size:192b/24B) */
+struct hwrm_port_mac_qcfg_output {
+ uint16_t error_code;
+ uint16_t req_type;
+ uint16_t seq_id;
+ uint16_t resp_len;
+ uint16_t mru;
+ uint16_t mtu;
+ uint8_t ipg;
+ uint8_t lpbk;
+ #define PORT_MAC_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_QCFG_RESP_LPBK_LAST PORT_MAC_QCFG_RESP_LPBK_REMOTE
+ uint8_t vlan_pri2cos_map_pri;
+ uint8_t flags;
+ #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE 0x1UL
+ #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE 0x4UL
+ #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE 0x8UL
+ #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x20UL
+ uint8_t tunnel_pri2cos_map_pri;
+ uint8_t dscp2pri_map_pri;
+ uint16_t rx_ts_capture_ptp_msg_type;
+ uint16_t tx_ts_capture_ptp_msg_type;
+ uint8_t cos_field_cfg;
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD 0x1UL
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
+ (0x0UL << 1)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
+ (0x1UL << 1)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
+ (0x2UL << 1)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
+ (0x3UL << 1)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
+ PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
+ (0x0UL << 3)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
+ (0x1UL << 3)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
+ (0x2UL << 3)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
+ (0x3UL << 3)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
+ PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ uint8_t valid;
+};
+
+
+/**************************
+ * hwrm_port_mac_ptp_qcfg *
+ **************************/
+
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ uint16_t req_type;
+ uint16_t cmpl_ring;
+ uint16_t seq_id;
+ uint16_t target_id;
+ uint64_t resp_addr;
+ uint16_t port_id;
+ uint8_t unused_0[6];
+};
+
+
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ uint16_t error_code;
+ uint16_t req_type;
+ uint16_t seq_id;
+ uint16_t resp_len;
+ uint8_t flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ uint8_t unused_0[3];
+ uint32_t rx_ts_reg_off_lower;
+ uint32_t rx_ts_reg_off_upper;
+ uint32_t rx_ts_reg_off_seq_id;
+ uint32_t rx_ts_reg_off_src_id_0;
+ uint32_t rx_ts_reg_off_src_id_1;
+ uint32_t rx_ts_reg_off_src_id_2;
+ uint32_t rx_ts_reg_off_domain_id;
+ uint32_t rx_ts_reg_off_fifo;
+ uint32_t rx_ts_reg_off_fifo_adv;
+ uint32_t rx_ts_reg_off_granularity;
+ uint32_t tx_ts_reg_off_lower;
+ uint32_t tx_ts_reg_off_upper;
+ uint32_t tx_ts_reg_off_seq_id;
+ uint32_t tx_ts_reg_off_fifo;
+ uint32_t tx_ts_reg_off_granularity;
+ uint8_t unused_1[7];
+ uint8_t valid;
+};
+
+
/* hwrm_vnic_alloc */
/*
* Description: This VNIC is a resource in the RX side of the chip that is used
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index a3134074..cae95f8f 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -36,7 +36,7 @@
#include <unistd.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
#include <rte_byteorder.h>
@@ -57,7 +57,7 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg)
ret_param.msg = msg;
_rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX,
- NULL, &ret_param);
+ &ret_param);
/* Default to approve */
if (ret_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED)
@@ -85,7 +85,7 @@ int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set Tx loopback on non-PF port %d!\n",
port);
return -ENOTSUP;
@@ -127,7 +127,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set all queues drop on non-PF port!\n");
return -ENOTSUP;
}
@@ -140,7 +140,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp->vnic_info[i].bd_stall = !on;
rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to update PF VNIC %d.\n", i);
+ PMD_DRV_LOG(ERR, "Failed to update PF VNIC %d.\n", i);
return rc;
}
}
@@ -151,7 +151,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", i);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", i);
break;
}
}
@@ -180,7 +180,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d mac address on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -224,7 +224,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
/* Requested BW can't be greater than link speed */
if (tot_rate > eth_dev->data->dev_link.link_speed) {
- RTE_LOG(ERR, PMD, "Rate > Link speed. Set to %d\n", tot_rate);
+ PMD_DRV_LOG(ERR, "Rate > Link speed. Set to %d\n", tot_rate);
return -EINVAL;
}
@@ -262,7 +262,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set mac spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@@ -314,7 +314,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VLAN spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@@ -333,7 +333,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
rc = -1;
}
} else {
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
}
return rc;
@@ -367,7 +367,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d stripq on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -377,7 +377,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc)
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
return rc;
}
@@ -407,7 +407,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
return -EINVAL;
if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
- RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
+ PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
return -ENOTSUP;
}
@@ -430,7 +430,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
&bp->pf.vf_info[vf].l2_rx_mask,
bnxt_set_rx_mask_no_vlan);
if (rc)
- RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+ PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
return rc;
}
@@ -442,7 +442,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
struct bnxt_vnic_info vnic;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VLAN table on non-PF port!\n");
return -EINVAL;
}
@@ -455,7 +455,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
/* This simply indicates there's no driver loaded.
* This is not an error.
*/
- RTE_LOG(ERR, PMD, "Unable to get default VNIC for VF %d\n", vf);
+ PMD_DRV_LOG(ERR, "Unable to get default VNIC for VF %d\n", vf);
} else {
memset(&vnic, 0, sizeof(vnic));
vnic.fw_vnic_id = dflt_vnic;
@@ -518,9 +518,9 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
/* Now check that there's space */
if (cnt == getpagesize() / sizeof(struct
bnxt_vlan_antispoof_table_entry)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VLAN anti-spoof table is full\n");
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VF %d cannot add VLAN %u\n",
i, vlan);
rc = -1;
@@ -585,7 +585,7 @@ int rte_pmd_bnxt_get_vf_stats(uint16_t port,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to get VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -612,7 +612,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to reset VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -638,7 +638,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to query VF %d RX stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -665,7 +665,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to query VF %d TX drops on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -697,7 +697,7 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to config VF %d MAC on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -773,7 +773,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d vlan insert on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -807,7 +807,7 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set persist stats on non-PF port %d!\n",
port);
return -EINVAL;
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h
index f881d30d..cd7227ac 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.h
+++ b/drivers/net/bnxt/rte_pmd_bnxt.h
@@ -34,7 +34,7 @@
#ifndef _PMD_BNXT_H_
#define _PMD_BNXT_H_
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
/*
* Response sent back to the caller after callback
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index dea1bd5c..4a6633ed 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/bonding/meson.build b/drivers/net/bonding/meson.build
new file mode 100644
index 00000000..b90abc6d
--- /dev/null
+++ b/drivers/net/bonding/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+name = 'bond' #, james bond :-)
+sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c',
+ 'rte_eth_bond_args.c', 'rte_eth_bond_8023ad.c', 'rte_eth_bond_alb.c')
+
+deps += 'sched' # needed for rte_bitmap.h
+deps += ['ip_frag', 'cmdline']
+
+install_headers('rte_eth_bond.h', 'rte_eth_bond_8023ad.h')
diff --git a/drivers/net/bonding/rte_eth_bond.h b/drivers/net/bonding/rte_eth_bond.h
index 87ff2917..b668ff9a 100644
--- a/drivers/net/bonding/rte_eth_bond.h
+++ b/drivers/net/bonding/rte_eth_bond.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _RTE_ETH_BOND_H_
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index eee9e502..c452318f 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <stddef.h>
@@ -1173,7 +1144,8 @@ bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
uint8_t i;
for (i = 0; i < internals->active_slave_count; i++)
- bond_mode_8023ad_activate_slave(bond_dev, i);
+ bond_mode_8023ad_activate_slave(bond_dev,
+ internals->active_slaves[i]);
return 0;
}
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h
index 2874336d..d8b5dbc2 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef RTE_ETH_BOND_8023AD_H_
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
index 433c7000..0f490a51 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad_private.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef RTE_ETH_BOND_8023AD_PRIVATE_H_
diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c
index f7efbb78..3f9945b3 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.c
+++ b/drivers/net/bonding/rte_eth_bond_alb.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include "rte_eth_bond_private.h"
diff --git a/drivers/net/bonding/rte_eth_bond_alb.h b/drivers/net/bonding/rte_eth_bond_alb.h
index 9f17f7c8..4640fd24 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.h
+++ b/drivers/net/bonding/rte_eth_bond_alb.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef RTE_ETH_BOND_ALB_H_
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 980e6368..f854b737 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -1,41 +1,12 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <string.h>
#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_tcp.h>
#include <rte_bus_vdev.h>
#include <rte_kvargs.h>
@@ -63,6 +34,25 @@ valid_bonded_port_id(uint16_t port_id)
}
int
+check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev)
+{
+ int i;
+ struct bond_dev_private *internals;
+
+ if (check_for_bonded_ethdev(eth_dev) != 0)
+ return 0;
+
+ internals = eth_dev->data->dev_private;
+
+ /* Check if any of slave devices is a bonded device */
+ for (i = 0; i < internals->slave_count; i++)
+ if (valid_bonded_port_id(internals->slaves[i].port_id) == 0)
+ return 1;
+
+ return 0;
+}
+
+int
valid_slave_port_id(uint16_t port_id, uint8_t mode)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
@@ -252,9 +242,6 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
return -1;
}
- /* Add slave details to bonded device */
- slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
-
rte_eth_dev_info_get(slave_port_id, &dev_info);
if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) {
RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small",
@@ -272,8 +259,13 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
if (internals->slave_count < 1) {
/* if MAC is not user defined then use MAC of first slave add to
* bonded device */
- if (!internals->user_defined_mac)
- mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs);
+ if (!internals->user_defined_mac) {
+ if (mac_address_set(bonded_eth_dev,
+ slave_eth_dev->data->mac_addrs)) {
+ RTE_BOND_LOG(ERR, "Failed to set MAC address");
+ return -1;
+ }
+ }
/* Inherit eth dev link properties from first slave */
link_properties_set(bonded_eth_dev,
@@ -326,18 +318,21 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
internals->slave_count++;
- /* Update all slave devices MACs*/
- mac_address_slaves_update(bonded_eth_dev);
-
if (bonded_eth_dev->data->dev_started) {
if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
- slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
+ internals->slave_count--;
RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
slave_port_id);
return -1;
}
}
+ /* Add slave details to bonded device */
+ slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
+
+ /* Update all slave devices MACs*/
+ mac_address_slaves_update(bonded_eth_dev);
+
/* Register link status change callback with bonded device pointer as
* argument*/
rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
@@ -434,7 +429,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
&rte_eth_devices[bonded_port_id].data->port_id);
/* Restore original MAC address of slave device */
- mac_address_set(&rte_eth_devices[slave_port_id],
+ rte_eth_dev_default_mac_addr_set(slave_port_id,
&(internals->slaves[slave_idx].persisted_mac_addr));
slave_eth_dev = &rte_eth_devices[slave_port_id];
@@ -496,10 +491,18 @@ rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id)
int
rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode)
{
+ struct rte_eth_dev *bonded_eth_dev;
+
if (valid_bonded_port_id(bonded_port_id) != 0)
return -1;
- return bond_ethdev_mode_set(&rte_eth_devices[bonded_port_id], mode);
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+
+ if (check_for_master_bonded_ethdev(bonded_eth_dev) != 0 &&
+ mode == BONDING_MODE_8023AD)
+ return -1;
+
+ return bond_ethdev_mode_set(bonded_eth_dev, mode);
}
int
@@ -667,15 +670,15 @@ rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy)
switch (policy) {
case BALANCE_XMIT_POLICY_LAYER2:
internals->balance_xmit_policy = policy;
- internals->xmit_hash = xmit_l2_hash;
+ internals->burst_xmit_hash = burst_xmit_l2_hash;
break;
case BALANCE_XMIT_POLICY_LAYER23:
internals->balance_xmit_policy = policy;
- internals->xmit_hash = xmit_l23_hash;
+ internals->burst_xmit_hash = burst_xmit_l23_hash;
break;
case BALANCE_XMIT_POLICY_LAYER34:
internals->balance_xmit_policy = policy;
- internals->xmit_hash = xmit_l34_hash;
+ internals->burst_xmit_hash = burst_xmit_l34_hash;
break;
default:
diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c
index e816da31..27d3101b 100644
--- a/drivers/net/bonding/rte_eth_bond_args.c
+++ b/drivers/net/bonding/rte_eth_bond_args.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <rte_devargs.h>
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index fe232895..c34c3251 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1,41 +1,12 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <stdlib.h>
#include <netinet/in.h>
#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_vdev.h>
#include <rte_tcp.h>
#include <rte_udp.h>
@@ -309,87 +280,114 @@ bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
static uint16_t
bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
+ uint16_t nb_bufs)
{
- struct bond_dev_private *internals;
- struct bond_tx_queue *bd_tx_q;
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
- uint16_t num_of_slaves;
- uint16_t slaves[RTE_MAX_ETHPORTS];
- /* positions in slaves, not ID */
- uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
- uint8_t distributing_count;
+ uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
- uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
- uint16_t i, op_slave_idx;
+ uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t dist_slave_count;
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
+ /* 2-D array to sort mbufs for transmission on each slave into */
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
+ /* Number of mbufs for transmission on each slave */
+ uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
+ /* Mapping array generated by hash function to map mbufs to slaves */
+ uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
- /* Total amount of packets in slave_bufs */
- uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
- /* Slow packets placed in each slave */
+ uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- if (unlikely(nb_pkts == 0))
- return 0;
+ uint16_t i, j;
- bd_tx_q = (struct bond_tx_queue *)queue;
- internals = bd_tx_q->dev_private;
+ if (unlikely(nb_bufs == 0))
+ return 0;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
- num_of_slaves = internals->active_slave_count;
- if (num_of_slaves < 1)
- return num_tx_total;
+ slave_count = internals->active_slave_count;
+ if (unlikely(slave_count < 1))
+ return 0;
- memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) *
- num_of_slaves);
+ memcpy(slave_port_ids, internals->active_slaves,
+ sizeof(slave_port_ids[0]) * slave_count);
+
+
+ dist_slave_count = 0;
+ for (i = 0; i < slave_count; i++) {
+ struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
- distributing_count = 0;
- for (i = 0; i < num_of_slaves; i++) {
- struct port *port = &mode_8023ad_ports[slaves[i]];
if (ACTOR_STATE(port, DISTRIBUTING))
- distributing_offsets[distributing_count++] = i;
+ dist_slave_port_ids[dist_slave_count++] =
+ slave_port_ids[i];
}
- if (likely(distributing_count > 0)) {
- /* Populate slaves mbuf with the packets which are to be sent */
- for (i = 0; i < nb_pkts; i++) {
- /* Select output slave using hash based on xmit policy */
- op_slave_idx = internals->xmit_hash(bufs[i],
- distributing_count);
+ if (unlikely(dist_slave_count < 1))
+ return 0;
- /* Populate slave mbuf arrays with mbufs for that slave.
- * Use only slaves that are currently distributing.
- */
- uint8_t slave_offset =
- distributing_offsets[op_slave_idx];
- slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] =
- bufs[i];
- slave_nb_pkts[slave_offset]++;
- }
+ /*
+ * Populate slaves mbuf with the packets which are to be sent on it
+ * selecting output slave using hash based on xmit policy
+ */
+ internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
+ bufs_slave_port_idxs);
+
+ for (i = 0; i < nb_bufs; i++) {
+ /* Populate slave mbuf arrays with mbufs for that slave. */
+ uint8_t slave_idx = bufs_slave_port_idxs[i];
+
+ slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
}
+
/* Send packet burst on each slave device */
- for (i = 0; i < num_of_slaves; i++) {
- if (slave_nb_pkts[i] == 0)
+ for (i = 0; i < dist_slave_count; i++) {
+ if (slave_nb_bufs[i] == 0)
continue;
- num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
- slave_bufs[i], slave_nb_pkts[i]);
+ slave_tx_count = rte_eth_tx_burst(dist_slave_port_ids[i],
+ bd_tx_q->queue_id, slave_bufs[i],
+ slave_nb_bufs[i]);
- num_tx_total += num_tx_slave;
- num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
+ total_tx_count += slave_tx_count;
/* If tx burst fails move packets to end of bufs */
- if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
- uint16_t j = nb_pkts - num_tx_fail_total;
- for ( ; num_tx_slave < slave_nb_pkts[i]; j++,
- num_tx_slave++)
- bufs[j] = slave_bufs[i][num_tx_slave];
+ if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
+ slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ slave_tx_count;
+ total_tx_fail_count += slave_tx_fail_count[i];
+
+ /*
+ * Shift bufs to beginning of array to allow reordering
+ * later
+ */
+ for (j = 0; j < slave_tx_fail_count[i]; j++) {
+ slave_bufs[i][j] =
+ slave_bufs[i][(slave_tx_count - 1) + j];
+ }
}
}
- return num_tx_total;
+ /*
+ * If there are tx burst failures we move packets to end of bufs to
+ * preserve expected PMD behaviour of all failed transmitted being
+ * at the end of the input mbuf array
+ */
+ if (unlikely(total_tx_fail_count > 0)) {
+ int bufs_idx = nb_bufs - total_tx_fail_count - 1;
+
+ for (i = 0; i < slave_count; i++) {
+ if (slave_tx_fail_count[i] > 0) {
+ for (j = 0; j < slave_tx_fail_count[i]; j++)
+ bufs[bufs_idx++] = slave_bufs[i][j];
+ }
+ }
+ }
+
+ return total_tx_count;
}
@@ -788,96 +786,129 @@ ipv6_hash(struct ipv6_hdr *ipv6_hdr)
(word_src_addr[3] ^ word_dst_addr[3]);
}
-uint16_t
-xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count)
+
+void
+burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves)
{
- struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
+ struct ether_hdr *eth_hdr;
+ uint32_t hash;
+ int i;
- uint32_t hash = ether_hash(eth_hdr);
+ for (i = 0; i < nb_pkts; i++) {
+ eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *);
+
+ hash = ether_hash(eth_hdr);
- return (hash ^= hash >> 8) % slave_count;
+ slaves[i] = (hash ^= hash >> 8) % slave_count;
+ }
}
-uint16_t
-xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count)
+void
+burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves)
{
- struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
- uint16_t proto = eth_hdr->ether_type;
- size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
- uint32_t hash, l3hash = 0;
+ uint16_t i;
+ struct ether_hdr *eth_hdr;
+ uint16_t proto;
+ size_t vlan_offset;
+ uint32_t hash, l3hash;
- hash = ether_hash(eth_hdr);
+ for (i = 0; i < nb_pkts; i++) {
+ eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *);
+ l3hash = 0;
- if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
- struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
- ((char *)(eth_hdr + 1) + vlan_offset);
- l3hash = ipv4_hash(ipv4_hdr);
+ proto = eth_hdr->ether_type;
+ hash = ether_hash(eth_hdr);
- } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
- struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
- ((char *)(eth_hdr + 1) + vlan_offset);
- l3hash = ipv6_hash(ipv6_hdr);
- }
+ vlan_offset = get_vlan_offset(eth_hdr, &proto);
- hash = hash ^ l3hash;
- hash ^= hash >> 16;
- hash ^= hash >> 8;
+ if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
+ struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ l3hash = ipv4_hash(ipv4_hdr);
- return hash % slave_count;
-}
+ } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ l3hash = ipv6_hash(ipv6_hdr);
+ }
-uint16_t
-xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count)
-{
- struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);
- uint16_t proto = eth_hdr->ether_type;
- size_t vlan_offset = get_vlan_offset(eth_hdr, &proto);
+ hash = hash ^ l3hash;
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
- struct udp_hdr *udp_hdr = NULL;
- struct tcp_hdr *tcp_hdr = NULL;
- uint32_t hash, l3hash = 0, l4hash = 0;
+ slaves[i] = hash % slave_count;
+ }
+}
- if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
- struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
- ((char *)(eth_hdr + 1) + vlan_offset);
- size_t ip_hdr_offset;
+void
+burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves)
+{
+ struct ether_hdr *eth_hdr;
+ uint16_t proto;
+ size_t vlan_offset;
+ int i;
- l3hash = ipv4_hash(ipv4_hdr);
+ struct udp_hdr *udp_hdr;
+ struct tcp_hdr *tcp_hdr;
+ uint32_t hash, l3hash, l4hash;
- /* there is no L4 header in fragmented packet */
- if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
- ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
+ for (i = 0; i < nb_pkts; i++) {
+ eth_hdr = rte_pktmbuf_mtod(buf[i], struct ether_hdr *);
+ proto = eth_hdr->ether_type;
+ vlan_offset = get_vlan_offset(eth_hdr, &proto);
+ l3hash = 0;
+ l4hash = 0;
+
+ if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
+ struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ size_t ip_hdr_offset;
+
+ l3hash = ipv4_hash(ipv4_hdr);
+
+ /* there is no L4 header in fragmented packet */
+ if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)
+ == 0)) {
+ ip_hdr_offset = (ipv4_hdr->version_ihl
+ & IPV4_HDR_IHL_MASK) *
IPV4_IHL_MULTIPLIER;
- if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
- ip_hdr_offset);
+ if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
+ tcp_hdr = (struct tcp_hdr *)
+ ((char *)ipv4_hdr +
+ ip_hdr_offset);
+ l4hash = HASH_L4_PORTS(tcp_hdr);
+ } else if (ipv4_hdr->next_proto_id ==
+ IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *)
+ ((char *)ipv4_hdr +
+ ip_hdr_offset);
+ l4hash = HASH_L4_PORTS(udp_hdr);
+ }
+ }
+ } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
+ struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
+ ((char *)(eth_hdr + 1) + vlan_offset);
+ l3hash = ipv6_hash(ipv6_hdr);
+
+ if (ipv6_hdr->proto == IPPROTO_TCP) {
+ tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(tcp_hdr);
- } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
- ip_hdr_offset);
+ } else if (ipv6_hdr->proto == IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(udp_hdr);
}
}
- } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
- struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
- ((char *)(eth_hdr + 1) + vlan_offset);
- l3hash = ipv6_hash(ipv6_hdr);
- if (ipv6_hdr->proto == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)(ipv6_hdr + 1);
- l4hash = HASH_L4_PORTS(tcp_hdr);
- } else if (ipv6_hdr->proto == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
- l4hash = HASH_L4_PORTS(udp_hdr);
- }
- }
+ hash = l3hash ^ l4hash;
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
- hash = l3hash ^ l4hash;
- hash ^= hash >> 16;
- hash ^= hash >> 8;
-
- return hash % slave_count;
+ slaves[i] = hash % slave_count;
+ }
}
struct bwg_slave {
@@ -1185,156 +1216,239 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
static uint16_t
bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
+ uint16_t nb_bufs)
{
- struct bond_dev_private *internals;
- struct bond_tx_queue *bd_tx_q;
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
- uint16_t num_of_slaves;
- uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
- uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
+ /* Array to sort mbufs for transmission on each slave into */
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
+ /* Number of mbufs for transmission on each slave */
+ uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
+ /* Mapping array generated by hash function to map mbufs to slaves */
+ uint16_t bufs_slave_port_idxs[nb_bufs];
- int i, op_slave_id;
+ uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
- uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t i, j;
- bd_tx_q = (struct bond_tx_queue *)queue;
- internals = bd_tx_q->dev_private;
+ if (unlikely(nb_bufs == 0))
+ return 0;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
- num_of_slaves = internals->active_slave_count;
- memcpy(slaves, internals->active_slaves,
- sizeof(internals->active_slaves[0]) * num_of_slaves);
+ slave_count = internals->active_slave_count;
+ if (unlikely(slave_count < 1))
+ return 0;
- if (num_of_slaves < 1)
- return num_tx_total;
+ memcpy(slave_port_ids, internals->active_slaves,
+ sizeof(slave_port_ids[0]) * slave_count);
- /* Populate slaves mbuf with the packets which are to be sent on it */
- for (i = 0; i < nb_pkts; i++) {
- /* Select output slave using hash based on xmit policy */
- op_slave_id = internals->xmit_hash(bufs[i], num_of_slaves);
+ /*
+ * Populate slaves mbuf with the packets which are to be sent on it
+ * selecting output slave using hash based on xmit policy
+ */
+ internals->burst_xmit_hash(bufs, nb_bufs, slave_count,
+ bufs_slave_port_idxs);
+
+ for (i = 0; i < nb_bufs; i++) {
+ /* Populate slave mbuf arrays with mbufs for that slave. */
+ uint8_t slave_idx = bufs_slave_port_idxs[i];
- /* Populate slave mbuf arrays with mbufs for that slave */
- slave_bufs[op_slave_id][slave_nb_pkts[op_slave_id]++] = bufs[i];
+ slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
}
/* Send packet burst on each slave device */
- for (i = 0; i < num_of_slaves; i++) {
- if (slave_nb_pkts[i] > 0) {
- num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
- slave_bufs[i], slave_nb_pkts[i]);
+ for (i = 0; i < slave_count; i++) {
+ if (slave_nb_bufs[i] == 0)
+ continue;
- /* if tx burst fails move packets to end of bufs */
- if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
- int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave;
+ slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
+ bd_tx_q->queue_id, slave_bufs[i],
+ slave_nb_bufs[i]);
- tx_fail_total += slave_tx_fail_count;
- memcpy(&bufs[nb_pkts - tx_fail_total],
- &slave_bufs[i][num_tx_slave],
- slave_tx_fail_count * sizeof(bufs[0]));
+ total_tx_count += slave_tx_count;
+
+ /* If tx burst fails move packets to end of bufs */
+ if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
+ slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ slave_tx_count;
+ total_tx_fail_count += slave_tx_fail_count[i];
+
+ /*
+ * Shift bufs to beginning of array to allow reordering
+ * later
+ */
+ for (j = 0; j < slave_tx_fail_count[i]; j++) {
+ slave_bufs[i][j] =
+ slave_bufs[i][(slave_tx_count - 1) + j];
}
+ }
+ }
- num_tx_total += num_tx_slave;
+ /*
+ * If there are tx burst failures we move packets to end of bufs to
+ * preserve expected PMD behaviour of all failed transmitted being
+ * at the end of the input mbuf array
+ */
+ if (unlikely(total_tx_fail_count > 0)) {
+ int bufs_idx = nb_bufs - total_tx_fail_count - 1;
+
+ for (i = 0; i < slave_count; i++) {
+ if (slave_tx_fail_count[i] > 0) {
+ for (j = 0; j < slave_tx_fail_count[i]; j++)
+ bufs[bufs_idx++] = slave_bufs[i][j];
+ }
}
}
- return num_tx_total;
+ return total_tx_count;
}
static uint16_t
bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
- uint16_t nb_pkts)
+ uint16_t nb_bufs)
{
- struct bond_dev_private *internals;
- struct bond_tx_queue *bd_tx_q;
+ struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+ struct bond_dev_private *internals = bd_tx_q->dev_private;
- uint16_t num_of_slaves;
- uint16_t slaves[RTE_MAX_ETHPORTS];
- /* positions in slaves, not ID */
- uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
- uint8_t distributing_count;
+ uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
- uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0;
- uint16_t i, j, op_slave_idx;
- const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1;
+ uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
+ uint16_t dist_slave_count;
- /* Allocate additional packets in case 8023AD mode. */
- struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size];
- void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL };
+ /* 2-D array to sort mbufs for transmission on each slave into */
+ struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
+ /* Number of mbufs for transmission on each slave */
+ uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
+ /* Mapping array generated by hash function to map mbufs to slaves */
+ uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
- /* Total amount of packets in slave_bufs */
- uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
- /* Slow packets placed in each slave */
- uint8_t slave_slow_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t slave_tx_count, slave_tx_fail_count[RTE_MAX_ETHPORTS] = { 0 };
+ uint16_t total_tx_count = 0, total_tx_fail_count = 0;
- bd_tx_q = (struct bond_tx_queue *)queue;
- internals = bd_tx_q->dev_private;
+ uint16_t i, j;
+
+ if (unlikely(nb_bufs == 0))
+ return 0;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
- num_of_slaves = internals->active_slave_count;
- if (num_of_slaves < 1)
- return num_tx_total;
-
- memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
-
- distributing_count = 0;
- for (i = 0; i < num_of_slaves; i++) {
- struct port *port = &mode_8023ad_ports[slaves[i]];
+ slave_count = internals->active_slave_count;
+ if (unlikely(slave_count < 1))
+ return 0;
- slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
- slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
- NULL);
- slave_nb_pkts[i] = slave_slow_nb_pkts[i];
+ memcpy(slave_port_ids, internals->active_slaves,
+ sizeof(slave_port_ids[0]) * slave_count);
- for (j = 0; j < slave_slow_nb_pkts[i]; j++)
- slave_bufs[i][j] = slow_pkts[j];
+ dist_slave_count = 0;
+ for (i = 0; i < slave_count; i++) {
+ struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
if (ACTOR_STATE(port, DISTRIBUTING))
- distributing_offsets[distributing_count++] = i;
+ dist_slave_port_ids[dist_slave_count++] =
+ slave_port_ids[i];
}
- if (likely(distributing_count > 0)) {
- /* Populate slaves mbuf with the packets which are to be sent on it */
- for (i = 0; i < nb_pkts; i++) {
- /* Select output slave using hash based on xmit policy */
- op_slave_idx = internals->xmit_hash(bufs[i], distributing_count);
+ if (likely(dist_slave_count > 1)) {
- /* Populate slave mbuf arrays with mbufs for that slave. Use only
- * slaves that are currently distributing. */
- uint8_t slave_offset = distributing_offsets[op_slave_idx];
- slave_bufs[slave_offset][slave_nb_pkts[slave_offset]] = bufs[i];
- slave_nb_pkts[slave_offset]++;
+ /*
+ * Populate slaves mbuf with the packets which are to be sent
+ * on it, selecting output slave using hash based on xmit policy
+ */
+ internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
+ bufs_slave_port_idxs);
+
+ for (i = 0; i < nb_bufs; i++) {
+ /*
+ * Populate slave mbuf arrays with mbufs for that
+ * slave
+ */
+ uint8_t slave_idx = bufs_slave_port_idxs[i];
+
+ slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] =
+ bufs[i];
}
- }
- /* Send packet burst on each slave device */
- for (i = 0; i < num_of_slaves; i++) {
- if (slave_nb_pkts[i] == 0)
- continue;
- num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
- slave_bufs[i], slave_nb_pkts[i]);
+ /* Send packet burst on each slave device */
+ for (i = 0; i < dist_slave_count; i++) {
+ if (slave_nb_bufs[i] == 0)
+ continue;
- /* If tx burst fails drop slow packets */
- for ( ; num_tx_slave < slave_slow_nb_pkts[i]; num_tx_slave++)
- rte_pktmbuf_free(slave_bufs[i][num_tx_slave]);
+ slave_tx_count = rte_eth_tx_burst(
+ dist_slave_port_ids[i],
+ bd_tx_q->queue_id, slave_bufs[i],
+ slave_nb_bufs[i]);
+
+ total_tx_count += slave_tx_count;
+
+ /* If tx burst fails move packets to end of bufs */
+ if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
+ slave_tx_fail_count[i] = slave_nb_bufs[i] -
+ slave_tx_count;
+ total_tx_fail_count += slave_tx_fail_count[i];
+
+ /*
+ * Shift bufs to beginning of array to allow
+ * reordering later
+ */
+ for (j = 0; j < slave_tx_fail_count[i]; j++)
+ slave_bufs[i][j] =
+ slave_bufs[i]
+ [(slave_tx_count - 1)
+ + j];
+ }
+ }
- num_tx_total += num_tx_slave - slave_slow_nb_pkts[i];
- num_tx_fail_total += slave_nb_pkts[i] - num_tx_slave;
+ /*
+ * If there are tx burst failures we move packets to end of
+ * bufs to preserve expected PMD behaviour of all failed
+ * transmitted being at the end of the input mbuf array
+ */
+ if (unlikely(total_tx_fail_count > 0)) {
+ int bufs_idx = nb_bufs - total_tx_fail_count - 1;
+
+ for (i = 0; i < slave_count; i++) {
+ if (slave_tx_fail_count[i] > 0) {
+ for (j = 0;
+ j < slave_tx_fail_count[i];
+ j++) {
+ bufs[bufs_idx++] =
+ slave_bufs[i][j];
+ }
+ }
+ }
+ }
+ }
- /* If tx burst fails move packets to end of bufs */
- if (unlikely(num_tx_slave < slave_nb_pkts[i])) {
- uint16_t j = nb_pkts - num_tx_fail_total;
- for ( ; num_tx_slave < slave_nb_pkts[i]; j++, num_tx_slave++)
- bufs[j] = slave_bufs[i][num_tx_slave];
+ /* Check for LACP control packets and send if available */
+ for (i = 0; i < slave_count; i++) {
+ struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+ struct rte_mbuf *ctrl_pkt = NULL;
+
+ if (likely(rte_ring_empty(port->tx_ring)))
+ continue;
+
+ if (rte_ring_dequeue(port->tx_ring,
+ (void **)&ctrl_pkt) != -ENOENT) {
+ slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
+ bd_tx_q->queue_id, &ctrl_pkt, 1);
+ /*
+ * re-enqueue LAG control plane packets to buffering
+ * ring if transmission fails so the packet isn't lost.
+ */
+ if (slave_tx_count != 1)
+ rte_ring_enqueue(port->tx_ring, ctrl_pkt);
}
}
- return num_tx_total;
+ return total_tx_count;
}
static uint16_t
@@ -1500,7 +1614,8 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
case BONDING_MODE_BALANCE:
case BONDING_MODE_BROADCAST:
for (i = 0; i < internals->slave_count; i++) {
- if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
+ if (rte_eth_dev_default_mac_addr_set(
+ internals->slaves[i].port_id,
bonded_eth_dev->data->mac_addrs)) {
RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
internals->slaves[i].port_id);
@@ -1518,15 +1633,16 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
for (i = 0; i < internals->slave_count; i++) {
if (internals->slaves[i].port_id ==
internals->current_primary_port) {
- if (mac_address_set(&rte_eth_devices[internals->primary_port],
+ if (rte_eth_dev_default_mac_addr_set(
+ internals->primary_port,
bonded_eth_dev->data->mac_addrs)) {
RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
internals->current_primary_port);
return -1;
}
} else {
- if (mac_address_set(
- &rte_eth_devices[internals->slaves[i].port_id],
+ if (rte_eth_dev_default_mac_addr_set(
+ internals->slaves[i].port_id,
&internals->slaves[i].persisted_mac_addr)) {
RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
internals->slaves[i].port_id);
@@ -2476,7 +2592,7 @@ bond_ethdev_delayed_lsc_propagation(void *arg)
return;
_rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
- RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
+ RTE_ETH_EVENT_INTR_LSC, NULL);
}
int
@@ -2584,7 +2700,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
else
_rte_eth_dev_callback_process(bonded_eth_dev,
RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
} else {
if (internals->link_down_delay_ms > 0)
@@ -2594,7 +2710,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
else
_rte_eth_dev_callback_process(bonded_eth_dev,
RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
}
return 0;
@@ -2707,6 +2823,41 @@ bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+static int
+bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct rte_eth_dev *slave_eth_dev;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int ret, i;
+
+ rte_spinlock_lock(&internals->lock);
+
+ for (i = 0; i < internals->slave_count; i++) {
+ slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
+ if (*slave_eth_dev->dev_ops->mtu_set == NULL) {
+ rte_spinlock_unlock(&internals->lock);
+ return -ENOTSUP;
+ }
+ }
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_eth_dev_set_mtu(internals->slaves[i].port_id, mtu);
+ if (ret < 0) {
+ rte_spinlock_unlock(&internals->lock);
+ return ret;
+ }
+ }
+
+ rte_spinlock_unlock(&internals->lock);
+ return 0;
+}
+
+static void
+bond_ethdev_mac_address_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ if (mac_address_set(dev, addr))
+ RTE_BOND_LOG(ERR, "Failed to update MAC address");
+}
+
const struct eth_dev_ops default_dev_ops = {
.dev_start = bond_ethdev_start,
.dev_stop = bond_ethdev_stop,
@@ -2726,7 +2877,9 @@ const struct eth_dev_ops default_dev_ops = {
.reta_update = bond_ethdev_rss_reta_update,
.reta_query = bond_ethdev_rss_reta_query,
.rss_hash_update = bond_ethdev_rss_hash_update,
- .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get
+ .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get,
+ .mtu_set = bond_ethdev_mtu_set,
+ .mac_addr_set = bond_ethdev_mac_address_set
};
static int
@@ -2769,7 +2922,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
internals->mode = BONDING_MODE_INVALID;
internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
- internals->xmit_hash = xmit_l2_hash;
+ internals->burst_xmit_hash = burst_xmit_l2_hash;
internals->user_defined_mac = 0;
internals->link_status_polling_enabled = 0;
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index 1392da98..92e15f8c 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -1,40 +1,11 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#ifndef _RTE_ETH_BOND_PRIVATE_H_
#define _RTE_ETH_BOND_PRIVATE_H_
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_spinlock.h>
#include <rte_bitmap.h>
@@ -109,8 +80,8 @@ struct bond_slave_details {
uint16_t reta_size;
};
-
-typedef uint16_t (*xmit_hash_t)(const struct rte_mbuf *buf, uint8_t slave_count);
+typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves);
/** Link Bonding PMD device private configuration Structure */
struct bond_dev_private {
@@ -126,7 +97,7 @@ struct bond_dev_private {
uint8_t balance_xmit_policy;
/**< Transmit policy - l2 / l23 / l34 for operation in balance mode */
- xmit_hash_t xmit_hash;
+ burst_xmit_hash_t burst_xmit_hash;
/**< Transmit policy hash function */
uint8_t user_defined_mac;
@@ -183,6 +154,9 @@ struct bond_dev_private {
extern const struct eth_dev_ops default_dev_ops;
int
+check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev);
+
+int
check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev);
/* Search given slave array to find position of given id.
@@ -245,14 +219,18 @@ void
slave_add(struct bond_dev_private *internals,
struct rte_eth_dev *slave_eth_dev);
-uint16_t
-xmit_l2_hash(const struct rte_mbuf *buf, uint8_t slave_count);
+void
+burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves);
-uint16_t
-xmit_l23_hash(const struct rte_mbuf *buf, uint8_t slave_count);
+void
+burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves);
+
+void
+burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint8_t slave_count, uint16_t *slaves);
-uint16_t
-xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count);
void
bond_ethdev_primary_set(struct bond_dev_private *internals,
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 282e2e62..56f38c83 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -44,7 +44,7 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index dc153c73..5cd260f4 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -56,7 +56,7 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_random.h>
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 5b828c23..28db6c06 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -55,7 +55,7 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_random.h>
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index fc10d958..3d5aa596 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -56,7 +56,7 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile
index 171686ec..9c2a5ea8 100644
--- a/drivers/net/dpaa/Makefile
+++ b/drivers/net/dpaa/Makefile
@@ -1,32 +1,6 @@
-# BSD LICENSE
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
#
-# Copyright 2017 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of NXP nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/rte.vars.mk
RTE_SDK_DPAA=$(RTE_SDK)/drivers/net/dpaa
@@ -43,7 +17,9 @@ CFLAGS += -I$(RTE_SDK_DPAA)/
CFLAGS += -I$(RTE_SDK_DPAA)/include
CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/base/qbman
CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
@@ -60,4 +36,7 @@ LDLIBS += -lrte_mempool_dpaa
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA_PMD)-include := rte_pmd_dpaa.h
+
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index cf5a2ecf..9b69ef45 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* System headers */
#include <stdio.h>
@@ -54,7 +28,7 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_ring.h>
@@ -64,6 +38,7 @@
#include <dpaa_ethdev.h>
#include <dpaa_rxtx.h>
+#include <rte_pmd_dpaa.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
@@ -72,6 +47,17 @@
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
+/* At present we only allow up to 4 push mode queues - as each of this queue
+ * need dedicated portal and we are short of portals.
+ */
+#define DPAA_MAX_PUSH_MODE_QUEUE 4
+
+static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
+
+
+/* Per FQ Taildrop in frame count */
+static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
struct rte_dpaa_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -107,23 +93,42 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
offsetof(struct dpaa_if_stats, tund)},
};
+static struct rte_dpaa_driver rte_dpaa_pmd;
+
+static inline void
+dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
+{
+ memset(opts, 0, sizeof(struct qm_mcc_initfq));
+ opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_PREFERINCACHE;
+ opts->fqd.context_a.stashing.exclusive = 0;
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY)
+ opts->fqd.context_a.stashing.annotation_cl =
+ DPAA_IF_RX_ANNOTATION_STASH;
+ opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+ opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
+}
+
static int
dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + VLAN_TAG_SIZE;
PMD_INIT_FUNC_TRACE();
- if (mtu < ETHER_MIN_MTU)
+ if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
return -EINVAL;
- if (mtu > ETHER_MAX_LEN)
+ if (frame_size > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
- fman_if_set_maxfrm(dpaa_intf->fif, mtu);
+ fman_if_set_maxfrm(dpaa_intf->fif, frame_size);
return 0;
}
@@ -131,15 +136,19 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
static int
dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
PMD_INIT_FUNC_TRACE();
if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- DPAA_MAX_RX_PKT_LEN)
- return dpaa_mtu_set(dev,
+ DPAA_MAX_RX_PKT_LEN) {
+ fman_if_set_maxfrm(dpaa_intf->fif,
dev->data->dev_conf.rxmode.max_rx_pkt_len);
- else
+ return 0;
+ } else {
return -1;
+ }
}
return 0;
}
@@ -212,19 +221,17 @@ dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
DPAA_PMD_ERR("Unable to open SoC device");
return -ENOTSUP; /* Not supported on this infra */
}
-
- ret = fscanf(svr_file, "svr:%x", &svr_ver);
- if (ret <= 0) {
+ if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
+ dpaa_svr_family = svr_ver & SVR_MASK;
+ else
DPAA_PMD_ERR("Unable to read SoC device");
- return -ENOTSUP; /* Not supported on this infra */
- }
- ret = snprintf(fw_version, fw_size,
- "svr:%x-fman-v%x",
- svr_ver,
- fman_ip_rev);
+ fclose(svr_file);
+ ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
+ svr_ver, fman_ip_rev);
ret += 1; /* add the size of '\0' */
+
if (fw_size < (uint32_t)ret)
return ret;
else
@@ -443,12 +450,16 @@ static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
static
int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
- uint16_t nb_desc __rte_unused,
+ uint16_t nb_desc,
unsigned int socket_id __rte_unused,
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mp)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
+ struct qm_mcc_initfq opts = {0};
+ u32 flags = 0;
+ int ret;
PMD_INIT_FUNC_TRACE();
@@ -484,7 +495,153 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
dpaa_intf->name, fd_offset,
fman_if_get_fdoff(dpaa_intf->fif));
}
- dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];
+ /* checking if push mode only, no error check for now */
+ if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+ dpaa_push_queue_idx++;
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
+ QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.context_a.stashing.exclusive = 0;
+ /* In muticore scenario stashing becomes a bottleneck on LS1046.
+ * So do not enable stashing in this case
+ */
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY)
+ opts.fqd.context_a.stashing.annotation_cl =
+ DPAA_IF_RX_ANNOTATION_STASH;
+ opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+ opts.fqd.context_a.stashing.context_cl =
+ DPAA_IF_RX_CONTEXT_STASH;
+
+ /*Create a channel and associate given queue with the channel*/
+ qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
+ opts.fqd.dest.channel = rxq->ch_id;
+ opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+ flags = QMAN_INITFQ_FLAG_SCHED;
+
+ /* Configure tail drop */
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret)
+ DPAA_PMD_ERR("Channel/Queue association failed. fqid %d"
+ " ret: %d", rxq->fqid, ret);
+ rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
+ rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+ rxq->is_static = true;
+ }
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ /* configure the CGR size as per the desc size */
+ if (dpaa_intf->cgr_rx) {
+ struct qm_mcc_initcgr cgr_opts = {0};
+
+ /* Enable tail drop with cgr on this queue */
+ qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
+ ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
+ if (ret) {
+ DPAA_PMD_WARN(
+ "rx taildrop modify fail on fqid %d (ret=%d)",
+ rxq->fqid, ret);
+ }
+ }
+
+ return 0;
+}
+
+int __rte_experimental
+dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ u16 ch_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ int ret;
+ u32 flags = 0;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
+ struct qm_mcc_initfq opts = {0};
+
+ if (dpaa_push_mode_max_queue)
+ DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n"
+ "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
+ dpaa_push_mode_max_queue);
+
+ dpaa_poll_queue_default_config(&opts);
+
+ switch (queue_conf->ev.sched_type) {
+ case RTE_SCHED_TYPE_ATOMIC:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+ /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
+ * configuration with HOLD_ACTIVE setting
+ */
+ opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
+ rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
+ return -1;
+ default:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
+ rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
+ break;
+ }
+
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
+ opts.fqd.dest.channel = ch_id;
+ opts.fqd.dest.wq = queue_conf->ev.priority;
+
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+
+ flags = QMAN_INITFQ_FLAG_SCHED;
+
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d",
+ rxq->fqid, ret);
+ return ret;
+ }
+
+ /* copy configuration which needs to be filled during dequeue */
+ memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
+ dev->data->rx_queues[eth_rx_queue_id] = rxq;
+
+ return ret;
+}
+
+int __rte_experimental
+dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+ u32 flags = 0;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
+
+ dpaa_poll_queue_default_config(&opts);
+
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
+ rxq->fqid, ret);
+ }
+
+ rxq->cb.dqrr_dpdk_cb = NULL;
+ dev->data->rx_queues[eth_rx_queue_id] = NULL;
return 0;
}
@@ -515,6 +672,22 @@ static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
PMD_INIT_FUNC_TRACE();
}
+static uint32_t
+dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
+ u32 frm_cnt = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
+ RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
+ rx_queue_id, frm_cnt);
+ }
+ return frm_cnt;
+}
+
static int dpaa_link_down(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
@@ -666,6 +839,7 @@ static struct eth_dev_ops dpaa_devops = {
.tx_queue_setup = dpaa_eth_tx_queue_setup,
.rx_queue_release = dpaa_eth_rx_queue_release,
.tx_queue_release = dpaa_eth_tx_queue_release,
+ .rx_queue_count = dpaa_dev_rx_queue_count,
.flow_ctrl_get = dpaa_flow_ctrl_get,
.flow_ctrl_set = dpaa_flow_ctrl_set,
@@ -692,6 +866,45 @@ static struct eth_dev_ops dpaa_devops = {
.fw_version_get = dpaa_fw_version_get,
};
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
+{
+ if (strcmp(dev->device->driver->name,
+ drv->driver.name))
+ return false;
+
+ return true;
+}
+
+static bool
+is_dpaa_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_dpaa_pmd);
+}
+
+int __rte_experimental
+rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct dpaa_if *dpaa_intf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_dpaa_supported(dev))
+ return -ENOTSUP;
+
+ dpaa_intf = dev->data->dev_private;
+
+ if (on)
+ fman_if_loopback_enable(dpaa_intf->fif);
+ else
+ fman_if_loopback_disable(dpaa_intf->fif);
+
+ return 0;
+}
+
static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
{
struct rte_eth_fc_conf *fc_conf;
@@ -720,11 +933,21 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
}
/* Initialise an Rx FQ */
-static int dpaa_rx_queue_init(struct qman_fq *fq,
+static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
uint32_t fqid)
{
- struct qm_mcc_initfq opts;
+ struct qm_mcc_initfq opts = {0};
int ret;
+ u32 flags = 0;
+ struct qm_mcc_initcgr cgr_opts = {
+ .we_mask = QM_CGR_WE_CS_THRES |
+ QM_CGR_WE_CSTD_EN |
+ QM_CGR_WE_MODE,
+ .cgr = {
+ .cstd_en = QM_CGR_EN,
+ .mode = QMAN_CGR_MODE_FRAME
+ }
+ };
PMD_INIT_FUNC_TRACE();
@@ -742,24 +965,28 @@ static int dpaa_rx_queue_init(struct qman_fq *fq,
fqid, ret);
return ret;
}
+ fq->is_static = false;
- opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
- QM_INITFQ_WE_CONTEXTA;
-
- opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
- opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
- QM_FQCTRL_PREFERINCACHE;
- opts.fqd.context_a.stashing.exclusive = 0;
- opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
- opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
- opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
+ dpaa_poll_queue_default_config(&opts);
- /*Enable tail drop */
- opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH;
- opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE;
- qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);
-
- ret = qman_init_fq(fq, 0, &opts);
+ if (cgr_rx) {
+ /* Enable tail drop with cgr on this queue */
+ qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
+ cgr_rx->cb = NULL;
+ ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
+ &cgr_opts);
+ if (ret) {
+ DPAA_PMD_WARN(
+ "rx taildrop init fail on rx fqid %d (ret=%d)",
+ fqid, ret);
+ goto without_cgr;
+ }
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = cgr_rx->cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+without_cgr:
+ ret = qman_init_fq(fq, flags, &opts);
if (ret)
DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
return ret;
@@ -769,7 +996,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq,
static int dpaa_tx_queue_init(struct qman_fq *fq,
struct fman_if *fman_intf)
{
- struct qm_mcc_initfq opts;
+ struct qm_mcc_initfq opts = {0};
int ret;
PMD_INIT_FUNC_TRACE();
@@ -800,7 +1027,7 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
/* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
{
- struct qm_mcc_initfq opts;
+ struct qm_mcc_initfq opts = {0};
int ret;
PMD_INIT_FUNC_TRACE();
@@ -841,6 +1068,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
struct fm_eth_port_cfg *cfg;
struct fman_if *fman_intf;
struct fman_if_bpool *bp, *tmp_bp;
+ uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
PMD_INIT_FUNC_TRACE();
@@ -867,6 +1095,16 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
else
num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+ /* if push mode queues to be enabled. Currenly we are allowing only
+ * one queue per thread.
+ */
+ if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
+ dpaa_push_mode_max_queue =
+ atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
+ if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
+ dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+ }
+
/* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
* queues.
*/
@@ -877,28 +1115,62 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
dpaa_intf->rx_queues = rte_zmalloc(NULL,
sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+ if (!dpaa_intf->rx_queues) {
+ DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
+ return -ENOMEM;
+ }
+
+ /* If congestion control is enabled globally*/
+ if (td_threshold) {
+ dpaa_intf->cgr_rx = rte_zmalloc(NULL,
+ sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
+ if (!dpaa_intf->cgr_rx) {
+ DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
+ ret = -ENOMEM;
+ goto free_rx;
+ }
+
+ ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
+ if (ret != num_rx_fqs) {
+ DPAA_PMD_WARN("insufficient CGRIDs available");
+ ret = -EINVAL;
+ goto free_rx;
+ }
+ } else {
+ dpaa_intf->cgr_rx = NULL;
+ }
+
for (loop = 0; loop < num_rx_fqs; loop++) {
fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
DPAA_PCD_FQID_MULTIPLIER + loop;
- ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);
+
+ if (dpaa_intf->cgr_rx)
+ dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
+
+ ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
+ dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
+ fqid);
if (ret)
- return ret;
+ goto free_rx;
dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
}
dpaa_intf->nb_rx_queues = num_rx_fqs;
- /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
+ /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
num_cores = rte_lcore_count();
dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
num_cores, MAX_CACHELINE);
- if (!dpaa_intf->tx_queues)
- return -ENOMEM;
+ if (!dpaa_intf->tx_queues) {
+ DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
+ ret = -ENOMEM;
+ goto free_rx;
+ }
for (loop = 0; loop < num_cores; loop++) {
ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
fman_intf);
if (ret)
- return ret;
+ goto free_tx;
dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
}
dpaa_intf->nb_tx_queues = num_cores;
@@ -935,13 +1207,8 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
"store MAC addresses",
ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
- rte_free(dpaa_intf->rx_queues);
- rte_free(dpaa_intf->tx_queues);
- dpaa_intf->rx_queues = NULL;
- dpaa_intf->tx_queues = NULL;
- dpaa_intf->nb_rx_queues = 0;
- dpaa_intf->nb_tx_queues = 0;
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_tx;
}
/* copy the primary mac address */
@@ -967,12 +1234,25 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
fman_if_stats_reset(fman_intf);
return 0;
+
+free_tx:
+ rte_free(dpaa_intf->tx_queues);
+ dpaa_intf->tx_queues = NULL;
+ dpaa_intf->nb_tx_queues = 0;
+
+free_rx:
+ rte_free(dpaa_intf->cgr_rx);
+ rte_free(dpaa_intf->rx_queues);
+ dpaa_intf->rx_queues = NULL;
+ dpaa_intf->nb_rx_queues = 0;
+ return ret;
}
static int
dpaa_dev_uninit(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ int loop;
PMD_INIT_FUNC_TRACE();
@@ -990,6 +1270,18 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)
if (dpaa_intf->fc_conf)
rte_free(dpaa_intf->fc_conf);
+ /* Release RX congestion Groups */
+ if (dpaa_intf->cgr_rx) {
+ for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
+ qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
+
+ qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
+ dpaa_intf->nb_rx_queues);
+ }
+
+ rte_free(dpaa_intf->cgr_rx);
+ dpaa_intf->cgr_rx = NULL;
+
rte_free(dpaa_intf->rx_queues);
dpaa_intf->rx_queues = NULL;
@@ -1046,10 +1338,12 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
is_global_init = 1;
}
- ret = rte_dpaa_portal_init((void *)1);
- if (ret) {
- DPAA_PMD_ERR("Unable to initialize portal");
- return ret;
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)1);
+ if (ret) {
+ DPAA_PMD_ERR("Unable to initialize portal");
+ return ret;
+ }
}
eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 5457d61b..c051ae32 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -1,41 +1,16 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DPAA_ETHDEV_H__
#define __DPAA_ETHDEV_H__
/* System headers */
#include <stdbool.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_event_eth_rx_adapter.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
@@ -43,11 +18,6 @@
#include <of.h>
#include <netcfg.h>
-/* DPAA SoC identifier; If this is not available, it can be concluded
- * that board is non-DPAA. Single slot is currently supported.
- */
-#define DPAA_SOC_ID_FILE "sys/devices/soc0/soc_id"
-
#define DPAA_MBUF_HW_ANNOTATION 64
#define DPAA_FD_PTA_SIZE 64
@@ -55,6 +25,13 @@
#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
#endif
+/* mbuf->seqn will be used to store event entry index for
+ * driver specific usage. For parallel mode queues, invalid
+ * index will be set and for atomic mode queues, valid value
+ * ranging from 1 to 16.
+ */
+#define DPAA_INVALID_MBUF_SEQN 0
+
/* we will re-use the HEADROOM for annotation in RX */
#define DPAA_HW_BUF_RESERVE 0
#define DPAA_PACKET_LAYOUT_ALIGN 64
@@ -65,24 +42,27 @@
#define DPAA_MIN_RX_BUF_SIZE 512
#define DPAA_MAX_RX_PKT_LEN 10240
-/* RX queue tail drop threshold
- * currently considering 32 KB packets.
- */
-#define CONG_THRESHOLD_RX_Q (32 * 1024)
+/* RX queue tail drop threshold (CGR Based) in frame count */
+#define CGR_RX_PERFQ_THRESH 256
/*max mac filter for memac(8) including primary mac addr*/
#define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1)
/*Maximum number of slots available in TX ring*/
-#define MAX_TX_RING_SLOTS 8
+#define DPAA_TX_BURST_SIZE 7
+
+#ifndef VLAN_TAG_SIZE
+#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
+#endif
/* PCD frame queues */
#define DPAA_PCD_FQID_START 0x400
#define DPAA_PCD_FQID_MULTIPLIER 0x100
#define DPAA_DEFAULT_NUM_PCD_QUEUES 1
+#define DPAA_MAX_NUM_PCD_QUEUES 32
#define DPAA_IF_TX_PRIORITY 3
-#define DPAA_IF_RX_PRIORITY 4
+#define DPAA_IF_RX_PRIORITY 0
#define DPAA_IF_DEBUG_PRIORITY 7
#define DPAA_IF_RX_ANNOTATION_STASH 1
@@ -129,6 +109,7 @@ struct dpaa_if {
char *name;
const struct fm_eth_port_cfg *cfg;
struct qman_fq *rx_queues;
+ struct qman_cgr *cgr_rx;
struct qman_fq *tx_queues;
struct qman_fq debug_queues[2];
uint16_t nb_rx_queues;
@@ -179,4 +160,25 @@ struct dpaa_if_stats {
uint64_t tund; /**<Tx Undersized */
};
+int __rte_experimental dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ u16 ch_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+
+int __rte_experimental dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id);
+
+enum qman_cb_dqrr_result
+dpaa_rx_cb_parallel(void *event,
+ struct qman_portal *qm __always_unused,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs);
+enum qman_cb_dqrr_result
+dpaa_rx_cb_atomic(void *event,
+ struct qman_portal *qm __always_unused,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs);
+
#endif
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 41e57f2e..0dea8e79 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* System headers */
@@ -52,18 +26,21 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_ring.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_udp.h>
+#include <rte_net.h>
+#include <rte_eventdev.h>
#include "dpaa_ethdev.h"
#include "dpaa_rxtx.h"
#include <rte_dpaa_bus.h>
#include <dpaa_mempool.h>
+#include <qman.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
#include <fsl_bman.h>
@@ -122,12 +99,6 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
switch (prs) {
- case DPAA_PKT_TYPE_NONE:
- m->packet_type = 0;
- break;
- case DPAA_PKT_TYPE_ETHER:
- m->packet_type = RTE_PTYPE_L2_ETHER;
- break;
case DPAA_PKT_TYPE_IPV4:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4;
@@ -136,6 +107,9 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6;
break;
+ case DPAA_PKT_TYPE_ETHER:
+ m->packet_type = RTE_PTYPE_L2_ETHER;
+ break;
case DPAA_PKT_TYPE_IPV4_FRAG:
case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
@@ -198,6 +172,9 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
break;
+ case DPAA_PKT_TYPE_NONE:
+ m->packet_type = 0;
+ break;
/* More switch cases can be added */
default:
dpaa_slow_parsing(m, prs);
@@ -208,12 +185,11 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
<< DPAA_PKT_L3_LEN_SHIFT;
/* Set the hash values */
- m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));
- m->ol_flags = PKT_RX_RSS_HASH;
+ m->hash.rss = (uint32_t)(annot->hash);
/* All packets with Bad checksum are dropped by interface (and
* corresponding notification issued to RX error queues).
*/
- m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD;
/* Check if Vlan is present */
if (prs & DPAA_PARSE_VLAN_MASK)
@@ -297,8 +273,32 @@ static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
}
+static inline void
+dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr)
+{
+ if (!mbuf->packet_type) {
+ struct rte_net_hdr_lens hdr_lens;
+
+ mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
+ RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
+ | RTE_PTYPE_L4_MASK);
+ mbuf->l2_len = hdr_lens.l2_len;
+ mbuf->l3_len = hdr_lens.l3_len;
+ }
+ if (mbuf->data_off < (DEFAULT_TX_ICEOF +
+ sizeof(struct dpaa_eth_parse_results_t))) {
+ DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
+ "Not enough Headroom "
+ "space for correct Checksum offload."
+ "So Calculating checksum in Software.");
+ dpaa_checksum(mbuf);
+ } else {
+ dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
+ }
+}
+
struct rte_mbuf *
-dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
+dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
{
struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
@@ -309,7 +309,7 @@ dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
DPAA_DP_LOG(DEBUG, "Received an SG frame");
- vaddr = rte_dpaa_mem_ptov(qm_fd_addr(fd));
+ vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
if (!vaddr) {
DPAA_PMD_ERR("unable to convert physical address");
return NULL;
@@ -318,7 +318,7 @@ dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
sg_temp = &sgt[i++];
hw_sg_to_cpu(sg_temp);
temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
- sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
+ sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp));
first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
bp_info->meta_data_size);
@@ -334,7 +334,8 @@ dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
while (i < DPAA_SGT_MAX_ENTRIES) {
sg_temp = &sgt[i++];
hw_sg_to_cpu(sg_temp);
- sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
+ sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
+ qm_sg_entry_get64(sg_temp));
cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
bp_info->meta_data_size);
cur_seg->data_off = sg_temp->offset;
@@ -356,34 +357,33 @@ dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
return first_seg;
}
-static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
- uint32_t ifid)
+static inline struct rte_mbuf *
+dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
{
- struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
struct rte_mbuf *mbuf;
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
void *ptr;
uint8_t format =
(fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
- uint16_t offset =
- (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
- uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK;
+ uint16_t offset;
+ uint32_t length;
DPAA_DP_LOG(DEBUG, " FD--->MBUF");
if (unlikely(format == qm_fd_sg))
return dpaa_eth_sg_to_mbuf(fd, ifid);
+ ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
+
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+
+ offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
+ length = fd->opaque & DPAA_FD_LENGTH_MASK;
+
/* Ignoring case when format != qm_fd_contig */
dpaa_display_frame(fd);
- ptr = rte_dpaa_mem_ptov(fd->addr);
- /* Ignoring case when ptr would be NULL. That is only possible incase
- * of a corrupted packet
- */
mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
- /* Prefetch the Parse results and packet data to L1 */
- rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
- rte_prefetch0((void *)((uint8_t *)ptr + offset));
mbuf->data_off = offset;
mbuf->data_len = length;
@@ -399,6 +399,161 @@ static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
return mbuf;
}
+void
+dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
+ void **bufs, int num_bufs)
+{
+ struct rte_mbuf *mbuf;
+ struct dpaa_bp_info *bp_info;
+ const struct qm_fd *fd;
+ void *ptr;
+ struct dpaa_if *dpaa_intf;
+ uint16_t offset, i;
+ uint32_t length;
+ uint8_t format;
+
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY) {
+ bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid);
+ ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd));
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+ bufs[0] = (struct rte_mbuf *)((char *)ptr -
+ bp_info->meta_data_size);
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY &&
+ i < num_bufs - 1) {
+ bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid);
+ ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd));
+ rte_prefetch0((void *)((uint8_t *)ptr +
+ DEFAULT_RX_ICEOF));
+ bufs[i + 1] = (struct rte_mbuf *)((char *)ptr -
+ bp_info->meta_data_size);
+ }
+
+ fd = &dqrr[i]->fd;
+ dpaa_intf = fq[i]->dpaa_intf;
+
+ format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
+ DPAA_FD_FORMAT_SHIFT;
+ if (unlikely(format == qm_fd_sg)) {
+ bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
+ continue;
+ }
+
+ offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
+ DPAA_FD_OFFSET_SHIFT;
+ length = fd->opaque & DPAA_FD_LENGTH_MASK;
+
+ mbuf = bufs[i];
+ mbuf->data_off = offset;
+ mbuf->data_len = length;
+ mbuf->pkt_len = length;
+ mbuf->port = dpaa_intf->ifid;
+
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->next = NULL;
+ rte_mbuf_refcnt_set(mbuf, 1);
+ dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
+ }
+}
+
+void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
+{
+ struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid);
+ void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
+
+ /* In case of LS1046, annotation stashing is disabled due to L2 cache
+ * being bottleneck in case of multicore scanario for this platform.
+ * So we prefetch the annoation beforehand, so that it is available
+ * in cache when accessed.
+ */
+ if (dpaa_svr_family == SVR_LS1046A_FAMILY)
+ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+
+ *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
+}
+
+static uint16_t
+dpaa_eth_queue_portal_rx(struct qman_fq *fq,
+ struct rte_mbuf **bufs,
+ uint16_t nb_bufs)
+{
+ int ret;
+
+ if (unlikely(fq->qp == NULL)) {
+ ret = rte_dpaa_portal_fq_init((void *)0, fq);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal %d", ret);
+ return 0;
+ }
+ }
+
+ return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp);
+}
+
+enum qman_cb_dqrr_result
+dpaa_rx_cb_parallel(void *event,
+ struct qman_portal *qm __always_unused,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs)
+{
+ u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
+ struct rte_mbuf *mbuf;
+ struct rte_event *ev = (struct rte_event *)event;
+
+ mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
+ ev->event_ptr = (void *)mbuf;
+ ev->flow_id = fq->ev.flow_id;
+ ev->sub_event_type = fq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = fq->ev.sched_type;
+ ev->queue_id = fq->ev.queue_id;
+ ev->priority = fq->ev.priority;
+ ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN;
+ mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
+ *bufs = mbuf;
+
+ return qman_cb_dqrr_consume;
+}
+
+enum qman_cb_dqrr_result
+dpaa_rx_cb_atomic(void *event,
+ struct qman_portal *qm __always_unused,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ void **bufs)
+{
+ u8 index;
+ u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
+ struct rte_mbuf *mbuf;
+ struct rte_event *ev = (struct rte_event *)event;
+
+ mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
+ ev->event_ptr = (void *)mbuf;
+ ev->flow_id = fq->ev.flow_id;
+ ev->sub_event_type = fq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = fq->ev.sched_type;
+ ev->queue_id = fq->ev.queue_id;
+ ev->priority = fq->ev.priority;
+
+ /* Save active dqrr entries */
+ index = DQRR_PTR2IDX(dqrr);
+ DPAA_PER_LCORE_DQRR_SIZE++;
+ DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
+ DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf;
+ ev->impl_opaque = index + 1;
+ mbuf->seqn = (uint32_t)index + 1;
+ *bufs = mbuf;
+
+ return qman_cb_dqrr_defer;
+}
+
uint16_t dpaa_eth_queue_rx(void *q,
struct rte_mbuf **bufs,
uint16_t nb_bufs)
@@ -408,10 +563,15 @@ uint16_t dpaa_eth_queue_rx(void *q,
uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
int ret;
- ret = rte_dpaa_portal_init((void *)0);
- if (ret) {
- DPAA_PMD_ERR("Failure in affining portal");
- return 0;
+ if (likely(fq->is_static))
+ return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
}
ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
@@ -445,7 +605,8 @@ static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d",
(uint64_t)bufs.addr, bufs.bpid);
- buf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size;
+ buf = (uint64_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr)
+ - bp_info->meta_data_size;
if (!buf)
goto out;
@@ -463,11 +624,11 @@ static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf,
if (!dpaa_mbuf)
return NULL;
- memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *)
+ memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + RTE_PKTMBUF_HEADROOM, (void *)
((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len);
/* Copy only the required fields */
- dpaa_mbuf->data_off = mbuf->data_off;
+ dpaa_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
dpaa_mbuf->pkt_len = mbuf->pkt_len;
dpaa_mbuf->ol_flags = mbuf->ol_flags;
dpaa_mbuf->packet_type = mbuf->packet_type;
@@ -504,6 +665,15 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
fd->opaque_addr = 0;
if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
+ if (!mbuf->packet_type) {
+ struct rte_net_hdr_lens hdr_lens;
+
+ mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
+ RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
+ | RTE_PTYPE_L4_MASK);
+ mbuf->l2_len = hdr_lens.l2_len;
+ mbuf->l3_len = hdr_lens.l3_len;
+ }
if (temp->data_off < DEFAULT_TX_ICEOF
+ sizeof(struct dpaa_eth_parse_results_t))
temp->data_off = DEFAULT_TX_ICEOF
@@ -610,18 +780,8 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
rte_pktmbuf_free(mbuf);
}
- if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
- if (mbuf->data_off < (DEFAULT_TX_ICEOF +
- sizeof(struct dpaa_eth_parse_results_t))) {
- DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
- "Not enough Headroom "
- "space for correct Checksum offload."
- "So Calculating checksum in Software.");
- dpaa_checksum(mbuf);
- } else {
- dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
- }
- }
+ if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
+ dpaa_unsegmented_checksum(mbuf, fd_arr);
}
/* Handle all mbufs on dpaa BMAN managed pool */
@@ -665,7 +825,7 @@ tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,
return 1;
}
- DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, dpaa_intf->bp_info->bpid);
+ DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid);
return 0;
}
@@ -676,25 +836,42 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
struct rte_mbuf *mbuf, *mi = NULL;
struct rte_mempool *mp;
struct dpaa_bp_info *bp_info;
- struct qm_fd fd_arr[MAX_TX_RING_SLOTS];
- uint32_t frames_to_send, loop, i = 0;
+ struct qm_fd fd_arr[DPAA_TX_BURST_SIZE];
+ uint32_t frames_to_send, loop, sent = 0;
uint16_t state;
int ret;
+ uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
- ret = rte_dpaa_portal_init((void *)0);
- if (ret) {
- DPAA_PMD_ERR("Failure in affining portal");
- return 0;
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal");
+ return 0;
+ }
}
DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
while (nb_bufs) {
- frames_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs;
- for (loop = 0; loop < frames_to_send; loop++, i++) {
- mbuf = bufs[i];
- if (RTE_MBUF_DIRECT(mbuf)) {
+ frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ?
+ DPAA_TX_BURST_SIZE : nb_bufs;
+ for (loop = 0; loop < frames_to_send; loop++) {
+ mbuf = *(bufs++);
+ if (likely(RTE_MBUF_DIRECT(mbuf))) {
mp = mbuf->pool;
+ bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+ if (likely(mp->ops_index ==
+ bp_info->dpaa_ops_index &&
+ mbuf->nb_segs == 1 &&
+ rte_mbuf_refcnt_read(mbuf) == 1)) {
+ DPAA_MBUF_TO_CONTIG_FD(mbuf,
+ &fd_arr[loop], bp_info->bpid);
+ if (mbuf->ol_flags &
+ DPAA_TX_CKSUM_OFFLOAD_MASK)
+ dpaa_unsegmented_checksum(mbuf,
+ &fd_arr[loop]);
+ continue;
+ }
} else {
mi = rte_mbuf_from_indirect(mbuf);
mp = mi->pool;
@@ -726,20 +903,34 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
goto send_pkts;
}
}
+ seqn = mbuf->seqn;
+ if (seqn != DPAA_INVALID_MBUF_SEQN) {
+ index = seqn - 1;
+ if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
+ flags[loop] =
+ ((index & QM_EQCR_DCA_IDXMASK) << 8);
+ flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
+ DPAA_PER_LCORE_DQRR_SIZE--;
+ DPAA_PER_LCORE_DQRR_HELD &=
+ ~(1 << index);
+ }
+ }
}
send_pkts:
loop = 0;
while (loop < frames_to_send) {
loop += qman_enqueue_multi(q, &fd_arr[loop],
- frames_to_send - loop);
+ &flags[loop],
+ frames_to_send - loop);
}
nb_bufs -= frames_to_send;
+ sent += frames_to_send;
}
- DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", i, q);
+ DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
- return i;
+ return sent;
}
uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h
index 2ffc4ffe..d3e63516 100644
--- a/drivers/net/dpaa/dpaa_rxtx.h
+++ b/drivers/net/dpaa/dpaa_rxtx.h
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP.
+ * Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DPDK_RXTX_H__
@@ -288,10 +262,14 @@ uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
struct rte_mbuf **bufs __rte_unused,
uint16_t nb_bufs __rte_unused);
-struct rte_mbuf *dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid);
+struct rte_mbuf *dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid);
int dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
struct qm_fd *fd,
uint32_t bpid);
+void dpaa_rx_cb(struct qman_fq **fq,
+ struct qm_dqrr_entry **dqrr, void **bufs, int num_bufs);
+
+void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs);
#endif
diff --git a/drivers/net/dpaa/rte_pmd_dpaa.h b/drivers/net/dpaa/rte_pmd_dpaa.h
new file mode 100644
index 00000000..38405ec0
--- /dev/null
+++ b/drivers/net/dpaa/rte_pmd_dpaa.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _PMD_DPAA_H_
+#define _PMD_DPAA_H_
+
+/**
+ * @file rte_pmd_dpaa.h
+ *
+ * NXP dpaa PMD specific functions.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ */
+
+#include <rte_ethdev_driver.h>
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Enable/Disable TX loopback
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable TX loopback.
+ * 0 - Disable TX loopback.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int __rte_experimental
+rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on);
+
+#endif /* _PMD_DPAA_H_ */
diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map b/drivers/net/dpaa/rte_pmd_dpaa_version.map
index a70bd197..3b937b10 100644
--- a/drivers/net/dpaa/rte_pmd_dpaa_version.map
+++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map
@@ -2,3 +2,13 @@ DPDK_17.11 {
local: *;
};
+
+EXPERIMENTAL {
+ global:
+
+ dpaa_eth_eventq_attach;
+ dpaa_eth_eventq_detach;
+ rte_pmd_dpaa_set_tx_loopback;
+
+ local: *;
+} DPDK_17.11;
diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
index ee9b2cce..5a93a0b9 100644
--- a/drivers/net/dpaa2/Makefile
+++ b/drivers/net/dpaa2/Makefile
@@ -1,33 +1,7 @@
-# BSD LICENSE
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2016 NXP
#
-# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
-# Copyright 2016 NXP.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Freescale Semiconductor, Inc nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -51,6 +25,7 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
# versioning export map
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index e3ab90ac..b93376de 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -1,41 +1,15 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <time.h>
#include <net/if.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
index e68febff..779cdf2b 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
@@ -63,7 +37,7 @@ struct dpaa2_fas {
uint8_t ppid;
__le16 ifpid;
__le32 status;
-} __packed;
+} __attribute__((__packed__));
/**
* HW Packet Annotation Register structures
@@ -219,6 +193,26 @@ struct dpaa2_annot_hdr {
#define L5_SOFT_PARSING_ERROR BIT_POS(1)
#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0)
+#define DPAA2_L3_IPv4 (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \
+ L3_IP_1_UNKNOWN_PROTOCOL | L3_IP_UNKNOWN_PROTOCOL)
+
+#define DPAA2_L3_IPv6 (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \
+ L3_IP_1_UNKNOWN_PROTOCOL | L3_IP_UNKNOWN_PROTOCOL)
+
+#define DPAA2_L3_IPv4_TCP (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \
+ L3_PROTO_TCP_PRESENT | L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT | \
+ L4_UNKNOWN_PROTOCOL)
+
+#define DPAA2_L3_IPv4_UDP (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \
+ L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL)
+
+#define DPAA2_L3_IPv6_TCP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \
+ L3_PROTO_TCP_PRESENT | L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT | \
+ L4_UNKNOWN_PROTOCOL)
+
+#define DPAA2_L3_IPv6_UDP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \
+ L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL)
+
/* Debug frame, otherwise supposed to be discarded */
#define DPAA2_ETH_FAS_DISC 0x80000000
/* MACSEC frame */
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 202f84f0..09a11d65 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1,41 +1,15 @@
-/*-
- * BSD LICENSE
+/* * SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <time.h>
#include <net/if.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -51,6 +25,7 @@
#include <dpaa2_hw_dpio.h>
#include <mc/fsl_dpmng.h>
#include "dpaa2_ethdev.h"
+#include <fsl_qbman_debug.h>
struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -172,6 +147,12 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
PMD_INIT_FUNC_TRACE();
if (mask & ETH_VLAN_FILTER_MASK) {
+ /* VLAN Filter not avaialble */
+ if (!priv->max_vlan_filters) {
+ RTE_LOG(INFO, PMD, "VLAN filter not available\n");
+ goto next_mask;
+ }
+
if (dev->data->dev_conf.rxmode.hw_vlan_filter)
ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
priv->token, true);
@@ -182,7 +163,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n",
ret);
}
-
+next_mask:
if (mask & ETH_VLAN_EXTEND_MASK) {
if (dev->data->dev_conf.rxmode.hw_vlan_extend)
RTE_LOG(INFO, PMD,
@@ -346,8 +327,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
if (eth_conf->rxmode.jumbo_frame == 1) {
if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
- ret = dpaa2_dev_mtu_set(dev,
- eth_conf->rxmode.max_rx_pkt_len);
+ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
+ priv->token, eth_conf->rxmode.max_rx_pkt_len);
if (ret) {
PMD_INIT_LOG(ERR,
"unable to set mtu. check config\n");
@@ -399,6 +380,25 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
+ * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
+ * to 0 for LS2 in the hardware thus disabling data/annotation
+ * stashing. For LX2 this is fixed in hardware and thus hash result and
+ * parse results can be received in FD using this option.
+ */
+ if (dpaa2_svr_family == SVR_LX2160A) {
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_FLCTYPE_HASH, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error setting FLCTYPE: Err = %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (eth_conf->rxmode.hw_vlan_filter)
+ dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+
/* update the current status */
dpaa2_dev_link_update(dev, 0);
@@ -418,7 +418,6 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- struct mc_soc_version mc_plat_info = {0};
struct dpaa2_queue *dpaa2_q;
struct dpni_queue cfg;
uint8_t options = 0;
@@ -450,18 +449,20 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
/*if ls2088 or rev2 device, enable the stashing */
- if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
- PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
-
- if ((mc_plat_info.svr & 0xffff0000) != SVR_LS2080A) {
+ if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
options |= DPNI_QUEUE_OPT_FLC;
cfg.flc.stash_control = true;
cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
/* 00 00 00 - last 6 bit represent annotation, context stashing,
- * data stashing setting 01 01 00 (0x14) to enable
- * 1 line data, 1 line annotation
+ * data stashing setting 01 01 00 (0x14)
+ * (in following order ->DS AS CS)
+ * to enable 1 line data, 1 line annotation.
+ * For LX2, this setting should be 01 00 00 (0x10)
*/
- cfg.flc.value |= 0x14;
+ if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
+ cfg.flc.value |= 0x10;
+ else
+ cfg.flc.value |= 0x14;
}
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
dpaa2_q->tc_index, flow_id, options, &cfg);
@@ -594,6 +595,37 @@ dpaa2_dev_tx_queue_release(void *q __rte_unused)
PMD_INIT_FUNC_TRACE();
}
+static uint32_t
+dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ int32_t ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_queue *dpaa2_q;
+ struct qbman_swp *swp;
+ struct qbman_fq_query_np_rslt state;
+ uint32_t frame_cnt = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ return -EINVAL;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
+
+ if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
+ frame_cnt = qbman_fq_state_frame_count(&state);
+ RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
+ rx_queue_id, frame_cnt);
+ }
+ return frame_cnt;
+}
+
static const uint32_t *
dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
{
@@ -655,7 +687,7 @@ dpaa2_interrupt_handler(void *param)
dpaa2_dev_link_update(dev, 0);
/* calling all the apps registered for link status event */
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
out:
ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
@@ -757,16 +789,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
"code = %d\n", ret);
return ret;
}
- /* VLAN Offload Settings */
- if (priv->max_vlan_filters) {
- ret = dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
- if (ret) {
- PMD_INIT_LOG(ERR, "Error to dpaa2_vlan_offload_set:"
- "code = %d\n", ret);
- return ret;
- }
- }
-
/* if the interrupts were configured on this devices*/
if (intr_handle && (intr_handle->fd) &&
@@ -967,7 +989,8 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + VLAN_TAG_SIZE;
PMD_INIT_FUNC_TRACE();
@@ -985,11 +1008,13 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
/* Set the Max Rx frame length as 'mtu' +
* Maximum Ethernet header length
*/
ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
- mtu + ETH_VLAN_HLEN);
+ frame_size);
if (ret) {
PMD_DRV_LOG(ERR, "setting the max frame length failed");
return -1;
@@ -1660,6 +1685,8 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
+ else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
+ dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
else
return -EINVAL;
@@ -1669,6 +1696,11 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
cfg.destination.id = dpcon_id;
cfg.destination.priority = queue_conf->ev.priority;
+ if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
+ cfg.destination.hold_active = 1;
+ }
+
options |= DPNI_QUEUE_OPT_USER_CTX;
cfg.user_context = (uint64_t)(dpaa2_ethq);
@@ -1736,6 +1768,7 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
.rx_queue_release = dpaa2_dev_rx_queue_release,
.tx_queue_setup = dpaa2_dev_tx_queue_setup,
.tx_queue_release = dpaa2_dev_tx_queue_release,
+ .rx_queue_count = dpaa2_dev_rx_queue_count,
.flow_ctrl_get = dpaa2_flow_ctrl_get,
.flow_ctrl_set = dpaa2_flow_ctrl_set,
.mac_addr_add = dpaa2_dev_add_mac_addr,
@@ -1872,7 +1905,6 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
}
eth_dev->dev_ops = &dpaa2_ethdev_ops;
- eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
@@ -1976,6 +2008,9 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
dpaa2_dev->eth_dev = eth_dev;
eth_dev->data->rx_mbuf_alloc_failed = 0;
+ if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+
/* Invoke PMD device initialization function */
diag = dpaa2_dev_init(eth_dev);
if (diag == 0)
@@ -2003,6 +2038,7 @@ rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
}
static struct rte_dpaa2_driver rte_dpaa2_pmd = {
+ .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
.drv_type = DPAA2_ETH,
.probe = rte_dpaa2_probe,
.remove = rte_dpaa2_remove,
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index b8e94aa3..ba0856f3 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -1,34 +1,8 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DPAA2_ETHDEV_H
@@ -76,6 +50,33 @@
/* Disable RX tail drop, default is enable */
#define DPAA2_RX_TAILDROP_OFF 0x04
+/* LX2 FRC Parsed values (Little Endian) */
+#define DPAA2_PKT_TYPE_ETHER 0x0060
+#define DPAA2_PKT_TYPE_IPV4 0x0000
+#define DPAA2_PKT_TYPE_IPV6 0x0020
+#define DPAA2_PKT_TYPE_IPV4_EXT \
+ (0x0001 | DPAA2_PKT_TYPE_IPV4)
+#define DPAA2_PKT_TYPE_IPV6_EXT \
+ (0x0001 | DPAA2_PKT_TYPE_IPV6)
+#define DPAA2_PKT_TYPE_IPV4_TCP \
+ (0x000e | DPAA2_PKT_TYPE_IPV4)
+#define DPAA2_PKT_TYPE_IPV6_TCP \
+ (0x000e | DPAA2_PKT_TYPE_IPV6)
+#define DPAA2_PKT_TYPE_IPV4_UDP \
+ (0x0010 | DPAA2_PKT_TYPE_IPV4)
+#define DPAA2_PKT_TYPE_IPV6_UDP \
+ (0x0010 | DPAA2_PKT_TYPE_IPV6)
+#define DPAA2_PKT_TYPE_IPV4_SCTP \
+ (0x000f | DPAA2_PKT_TYPE_IPV4)
+#define DPAA2_PKT_TYPE_IPV6_SCTP \
+ (0x000f | DPAA2_PKT_TYPE_IPV6)
+#define DPAA2_PKT_TYPE_IPV4_ICMP \
+ (0x0003 | DPAA2_PKT_TYPE_IPV4_EXT)
+#define DPAA2_PKT_TYPE_IPV6_ICMP \
+ (0x0003 | DPAA2_PKT_TYPE_IPV6_EXT)
+#define DPAA2_PKT_TYPE_VLAN_1 0x0160
+#define DPAA2_PKT_TYPE_VLAN_2 0x0260
+
struct dpaa2_dev_priv {
void *hw;
int32_t hw_id;
@@ -117,6 +118,11 @@ void dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
const struct qbman_result *dq,
struct dpaa2_queue *rxq,
struct rte_event *ev);
+void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev);
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
#endif /* _DPAA2_ETHDEV_H */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 8ecd238d..183293c1 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1,64 +1,116 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP.
+ * Copyright 2016 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Freescale Semiconductor, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <time.h>
#include <net/if.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_dev.h>
+#include <rte_fslmc.h>
#include <fslmc_logs.h>
#include <fslmc_vfio.h>
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_dpio.h>
#include <dpaa2_hw_mempool.h>
+#include <dpaa2_eventdev.h>
#include "dpaa2_ethdev.h"
#include "base/dpaa2_hw_dpni_annot.h"
+#define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
+ DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
+ DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
+ DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
+ DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
+ DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \
+} while (0)
+
+static inline void __attribute__((hot))
+dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
+{
+ PMD_RX_LOG(DEBUG, "frc = 0x%x ", frc);
+
+ m->packet_type = RTE_PTYPE_UNKNOWN;
+ switch (frc) {
+ case DPAA2_PKT_TYPE_ETHER:
+ m->packet_type = RTE_PTYPE_L2_ETHER;
+ break;
+ case DPAA2_PKT_TYPE_IPV4:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4;
+ break;
+ case DPAA2_PKT_TYPE_IPV6:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6;
+ break;
+ case DPAA2_PKT_TYPE_IPV4_EXT:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT;
+ break;
+ case DPAA2_PKT_TYPE_IPV6_EXT:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT;
+ break;
+ case DPAA2_PKT_TYPE_IPV4_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA2_PKT_TYPE_IPV6_TCP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
+ break;
+ case DPAA2_PKT_TYPE_IPV4_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA2_PKT_TYPE_IPV6_UDP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
+ break;
+ case DPAA2_PKT_TYPE_IPV4_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
+ break;
+ case DPAA2_PKT_TYPE_IPV6_SCTP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
+ break;
+ case DPAA2_PKT_TYPE_IPV4_ICMP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
+ break;
+ case DPAA2_PKT_TYPE_IPV6_ICMP:
+ m->packet_type = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
+ break;
+ case DPAA2_PKT_TYPE_VLAN_1:
+ case DPAA2_PKT_TYPE_VLAN_2:
+ m->ol_flags |= PKT_RX_VLAN;
+ break;
+ /* More switch cases can be added */
+ /* TODO: Add handling for checksum error check from FRC */
+ default:
+ m->packet_type = RTE_PTYPE_UNKNOWN;
+ }
+}
+
static inline uint32_t __attribute__((hot))
-dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
+dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr)
{
uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
struct dpaa2_annot_hdr *annotation =
(struct dpaa2_annot_hdr *)hw_annot_addr;
PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
-
if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
pkt_type = RTE_PTYPE_L2_ETHER_ARP;
goto parse_done;
@@ -114,21 +166,48 @@ parse_done:
return pkt_type;
}
-static inline void __attribute__((hot))
-dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
+static inline uint32_t __attribute__((hot))
+dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr)
{
struct dpaa2_annot_hdr *annotation =
- (struct dpaa2_annot_hdr *)hw_annot_addr;
+ (struct dpaa2_annot_hdr *)hw_annot_addr;
+
+ PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
+ /* Check offloads first */
if (BIT_ISSET_AT_POS(annotation->word3,
L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
mbuf->ol_flags |= PKT_RX_VLAN;
if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
- if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
+ else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+
+ /* Return some common types from parse processing */
+ switch (annotation->word4) {
+ case DPAA2_L3_IPv4:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
+ case DPAA2_L3_IPv6:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
+ case DPAA2_L3_IPv4_TCP:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_TCP;
+ case DPAA2_L3_IPv4_UDP:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_L4_UDP;
+ case DPAA2_L3_IPv6_TCP:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_TCP;
+ case DPAA2_L3_IPv6_UDP:
+ return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
+ RTE_PTYPE_L4_UDP;
+ default:
+ PMD_RX_LOG(DEBUG, "Slow parse the parsing results\n");
+ break;
+ }
+
+ return dpaa2_dev_rx_parse_slow(hw_annot_addr);
}
static inline struct rte_mbuf *__attribute__((hot))
@@ -159,13 +238,14 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
first_seg->nb_segs = 1;
first_seg->next = NULL;
-
- first_seg->packet_type = dpaa2_dev_rx_parse(
+ if (dpaa2_svr_family == SVR_LX2160A)
+ dpaa2_dev_rx_parse_frc(first_seg,
+ DPAA2_GET_FD_FRC_PARSE_SUM(fd));
+ else
+ first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
(uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ DPAA2_FD_PTA_SIZE);
- dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
- DPAA2_GET_FD_ADDR(fd)) +
- DPAA2_FD_PTA_SIZE, first_seg);
+
rte_mbuf_refcnt_set(first_seg, 1);
cur_seg = first_seg;
while (!DPAA2_SG_IS_FINAL(sge)) {
@@ -206,20 +286,22 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
mbuf->data_len = DPAA2_GET_FD_LEN(fd);
mbuf->pkt_len = mbuf->data_len;
+ mbuf->next = NULL;
+ rte_mbuf_refcnt_set(mbuf, 1);
/* Parse the packet */
- /* parse results are after the private - sw annotation area */
- mbuf->packet_type = dpaa2_dev_rx_parse(
+ /* parse results for LX2 are there in FRC field of FD.
+ * For other DPAA2 platforms , parse results are after
+ * the private - sw annotation area
+ */
+
+ if (dpaa2_svr_family == SVR_LX2160A)
+ dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
+ else
+ mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
(uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ DPAA2_FD_PTA_SIZE);
- dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
- DPAA2_GET_FD_ADDR(fd)) +
- DPAA2_FD_PTA_SIZE, mbuf);
-
- mbuf->next = NULL;
- rte_mbuf_refcnt_set(mbuf, 1);
-
PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
"fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
mbuf, mbuf->buf_addr, mbuf->data_off,
@@ -238,9 +320,11 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
struct qbman_sge *sgt, *sge = NULL;
int i;
- /* First Prepare FD to be transmited*/
- /* Resetting the buffer pool id and offset field*/
- fd->simple.bpid_offset = 0;
+ if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
+ int ret = rte_vlan_insert(&mbuf);
+ if (ret)
+ return ret;
+ }
temp = rte_pktmbuf_alloc(mbuf->pool);
if (temp == NULL) {
@@ -250,8 +334,8 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
+ DPAA2_SET_ONLY_FD_BPID(fd, bpid);
DPAA2_SET_FD_OFFSET(fd, temp->data_off);
- DPAA2_SET_FD_BPID(fd, bpid);
DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
/*Set Scatter gather table and Scatter gather entries*/
@@ -308,14 +392,14 @@ static void __attribute__ ((noinline)) __attribute__((hot))
eth_mbuf_to_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd, uint16_t bpid)
{
- /*Resetting the buffer pool id and offset field*/
- fd->simple.bpid_offset = 0;
+ if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
+ if (rte_vlan_insert(&mbuf)) {
+ rte_pktmbuf_free(mbuf);
+ return;
+ }
+ }
- DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
- DPAA2_SET_FD_LEN(fd, mbuf->data_len);
- DPAA2_SET_FD_BPID(fd, bpid);
- DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
- DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
+ DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
"fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
@@ -347,6 +431,12 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
struct rte_mbuf *m;
void *mb = NULL;
+ if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
+ int ret = rte_vlan_insert(&mbuf);
+ if (ret)
+ return ret;
+ }
+
if (rte_dpaa2_mbuf_alloc_bulk(
rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
@@ -363,14 +453,7 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
m->packet_type = mbuf->packet_type;
m->tx_offload = mbuf->tx_offload;
- /*Resetting the buffer pool id and offset field*/
- fd->simple.bpid_offset = 0;
-
- DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
- DPAA2_SET_FD_LEN(fd, mbuf->data_len);
- DPAA2_SET_FD_BPID(fd, bpid);
- DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
- DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
+ DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
(void *)mbuf, mbuf->buf_addr);
@@ -390,12 +473,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
/* Function receive frames for a given device and VQ*/
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
- struct qbman_result *dq_storage;
+ struct qbman_result *dq_storage, *dq_storage1 = NULL;
uint32_t fqid = dpaa2_q->fqid;
int ret, num_rx = 0;
- uint8_t is_last = 0, status;
+ uint8_t pending, status;
struct qbman_swp *swp;
- const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE];
+ const struct qbman_fd *fd, *next_fd;
struct qbman_pull_desc pulldesc;
struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
struct rte_eth_dev *dev = dpaa2_q->dev;
@@ -408,13 +491,14 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
}
swp = DPAA2_PER_LCORE_PORTAL;
- if (!q_storage->active_dqs) {
+ if (unlikely(!q_storage->active_dqs)) {
q_storage->toggle = 0;
dq_storage = q_storage->dq_storage[q_storage->toggle];
+ q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_pkts;
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_numframes(&pulldesc,
- (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
- DPAA2_DQRR_RING_SIZE : nb_pkts);
+ q_storage->last_num_pkts);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
@@ -437,7 +521,22 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
}
+
dq_storage = q_storage->active_dqs;
+ rte_prefetch0((void *)((uint64_t)(dq_storage)));
+ rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
+
+ /* Prepare next pull descriptor. This will give space for the
+ * prefething done on DQRR entries
+ */
+ q_storage->toggle ^= 1;
+ dq_storage1 = q_storage->dq_storage[q_storage->toggle];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+
/* Check if the previous issued command is completed.
* Also seems like the SWP is shared between the Ethernet Driver
* and the SEC driver.
@@ -446,33 +545,37 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
;
if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
clear_swp_active_dqs(q_storage->active_dpio_id);
- while (!is_last) {
+
+ pending = 1;
+
+ do {
/* Loop until the dq_storage is updated with
* new token by QBMAN
*/
while (!qbman_check_new_result(dq_storage))
;
- rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
+ rte_prefetch0((void *)((uint64_t)(dq_storage + 2)));
/* Check whether Last Pull command is Expired and
* setting Condition for Loop termination
*/
if (qbman_result_DQ_is_pull_complete(dq_storage)) {
- is_last = 1;
+ pending = 0;
/* Check for valid frame. */
- status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ status = qbman_result_DQ_flags(dq_storage);
if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
continue;
}
- fd[num_rx] = qbman_result_DQ_fd(dq_storage);
+ fd = qbman_result_DQ_fd(dq_storage);
+ next_fd = qbman_result_DQ_fd(dq_storage + 1);
/* Prefetch Annotation address for the parse results */
- rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx])
+ rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(next_fd)
+ DPAA2_FD_PTA_SIZE + 16));
- if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
- bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
+ if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
+ bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
else
- bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
+ bufs[num_rx] = eth_fd_to_mbuf(fd);
bufs[num_rx]->port = dev->data->port_id;
if (dev->data->dev_conf.rxmode.hw_vlan_strip)
@@ -480,7 +583,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
dq_storage++;
num_rx++;
- }
+ } while (pending);
if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
while (!qbman_check_command_complete(
@@ -488,14 +591,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
;
clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
}
- q_storage->toggle ^= 1;
- dq_storage = q_storage->dq_storage[q_storage->toggle];
- qbman_pull_desc_clear(&pulldesc);
- qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
- qbman_pull_desc_set_fq(&pulldesc, fqid);
- qbman_pull_desc_set_storage(&pulldesc, dq_storage,
- (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
- /* Issue a volatile dequeue command. */
+ /* issue a volatile dequeue command for next pull */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
PMD_RX_LOG(WARNING, "VDQ command is not issued."
@@ -504,13 +600,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
break;
}
- q_storage->active_dqs = dq_storage;
+ q_storage->active_dqs = dq_storage1;
q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
- set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
+ set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
dpaa2_q->rx_pkts += num_rx;
- /* Return the total number of packets received to DPAA2 app */
return num_rx;
}
@@ -521,7 +616,8 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
struct dpaa2_queue *rxq,
struct rte_event *ev)
{
- ev->mbuf = eth_fd_to_mbuf(fd);
+ rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+ DPAA2_FD_PTA_SIZE + 16));
ev->flow_id = rxq->ev.flow_id;
ev->sub_event_type = rxq->ev.sub_event_type;
@@ -531,9 +627,40 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
ev->queue_id = rxq->ev.queue_id;
ev->priority = rxq->ev.priority;
+ ev->mbuf = eth_fd_to_mbuf(fd);
+
qbman_swp_dqrr_consume(swp, dq);
}
+void __attribute__((hot))
+dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ uint8_t dqrr_index;
+
+ rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+ DPAA2_FD_PTA_SIZE + 16));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+
+ ev->mbuf = eth_fd_to_mbuf(fd);
+
+ dqrr_index = qbman_get_dqrr_idx(dq);
+ ev->mbuf->seqn = dqrr_index + 1;
+ DPAA2_PER_LCORE_DQRR_SIZE++;
+ DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+ DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
+}
+
/*
* Callback to handle sending packets through WRIOP based interface
*/
@@ -554,6 +681,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint16_t bpid;
struct rte_eth_dev *dev = dpaa2_q->dev;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ uint32_t flags[MAX_TX_RING_SLOTS] = {0};
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
@@ -572,7 +700,6 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
qbman_eq_desc_set_response(&eqdesc, 0, 0);
qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
dpaa2_q->flow_id, dpaa2_q->tc_index);
-
/*Clear the unused FD fields before sending*/
while (nb_pkts) {
/*Check if the queue is congested*/
@@ -587,11 +714,39 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
for (loop = 0; loop < frames_to_send; loop++) {
+ if ((*bufs)->seqn) {
+ uint8_t dqrr_index = (*bufs)->seqn - 1;
+
+ flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
+ dqrr_index;
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
+ (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
+ }
+
fd_arr[loop].simple.frc = 0;
DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
- if (RTE_MBUF_DIRECT(*bufs)) {
+ if (likely(RTE_MBUF_DIRECT(*bufs))) {
mp = (*bufs)->pool;
+ /* Check the basic scenario and set
+ * the FD appropriately here itself.
+ */
+ if (likely(mp && mp->ops_index ==
+ priv->bp_list->dpaa2_ops_index &&
+ (*bufs)->nb_segs == 1 &&
+ rte_mbuf_refcnt_read((*bufs)) == 1)) {
+ if (unlikely((*bufs)->ol_flags
+ & PKT_TX_VLAN_PKT)) {
+ ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_n_return;
+ }
+ DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+ &fd_arr[loop], mempool_to_bpid(mp));
+ bufs++;
+ continue;
+ }
} else {
mi = rte_mbuf_from_indirect(*bufs);
mp = mi->pool;
@@ -636,13 +791,14 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
loop = 0;
while (loop < frames_to_send) {
loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
- &fd_arr[loop], frames_to_send - loop);
+ &fd_arr[loop], &flags[loop],
+ frames_to_send - loop);
}
num_tx += frames_to_send;
- dpaa2_q->tx_pkts += frames_to_send;
nb_pkts -= frames_to_send;
}
+ dpaa2_q->tx_pkts += num_tx;
return num_tx;
send_n_return:
@@ -652,12 +808,14 @@ send_n_return:
while (i < loop) {
i += qbman_swp_enqueue_multiple(swp, &eqdesc,
- &fd_arr[i], loop - i);
+ &fd_arr[i],
+ &flags[loop],
+ loop - i);
}
num_tx += loop;
- dpaa2_q->tx_pkts += loop;
}
skip_tx:
+ dpaa2_q->tx_pkts += num_tx;
return num_tx;
}
diff --git a/drivers/net/dpaa2/mc/dpkg.c b/drivers/net/dpaa2/mc/dpkg.c
index 3f98907f..80f94f40 100644
--- a/drivers/net/dpaa2/mc/dpkg.c
+++ b/drivers/net/dpaa2/mc/dpkg.c
@@ -1,40 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2017 NXP
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 6f671fe0..69cf119c 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2016 NXP
+ *
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h
index 7f46bafb..4de70f30 100644
--- a/drivers/net/dpaa2/mc/fsl_dpkg.h
+++ b/drivers/net/dpaa2/mc/fsl_dpkg.h
@@ -1,41 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
- *
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
* Copyright 2013-2015 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPKG_H_
#define __FSL_DPKG_H_
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 5227ea15..f0edcd27 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_DPNI_H
#define __FSL_DPNI_H
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index 1a483329..eb3e9987 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -1,41 +1,8 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
+ * Copyright 2016-2017 NXP
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _FSL_DPNI_CMD_H
#define _FSL_DPNI_CMD_H
diff --git a/drivers/net/dpaa2/mc/fsl_net.h b/drivers/net/dpaa2/mc/fsl_net.h
index dbec306a..964870ba 100644
--- a/drivers/net/dpaa2/mc/fsl_net.h
+++ b/drivers/net/dpaa2/mc/fsl_net.h
@@ -1,40 +1,7 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * BSD LICENSE
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
* Copyright 2013-2015 Freescale Semiconductor Inc.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * GPL LICENSE SUMMARY
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FSL_NET_H
#define __FSL_NET_H
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index 3f0344b4..ba81a1f4 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2015 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c
index c6400bde..15c7dd84 100644
--- a/drivers/net/e1000/base/e1000_82575.c
+++ b/drivers/net/e1000/base/e1000_82575.c
@@ -2414,7 +2414,7 @@ out:
* e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
* @hw: pointer to the HW structure
*
- * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
* the values found in the EEPROM. This addresses an issue in which these
* bits are not restored from EEPROM after reset.
**/
diff --git a/drivers/net/e1000/base/e1000_ich8lan.c b/drivers/net/e1000/base/e1000_ich8lan.c
index 6dd046d2..92ab6fc6 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.c
+++ b/drivers/net/e1000/base/e1000_ich8lan.c
@@ -4888,7 +4888,7 @@ STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
* @hw: pointer to the HW structure
*
* ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
- * register, so the the bus width is hard coded.
+ * register, so the bus width is hard coded.
**/
STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
{
diff --git a/drivers/net/e1000/base/meson.build b/drivers/net/e1000/base/meson.build
new file mode 100644
index 00000000..5e1716de
--- /dev/null
+++ b/drivers/net/e1000/base/meson.build
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = [
+ 'e1000_80003es2lan.c',
+ 'e1000_82540.c',
+ 'e1000_82541.c',
+ 'e1000_82542.c',
+ 'e1000_82543.c',
+ 'e1000_82571.c',
+ 'e1000_82575.c',
+ 'e1000_api.c',
+ 'e1000_i210.c',
+ 'e1000_ich8lan.c',
+ 'e1000_mac.c',
+ 'e1000_manage.c',
+ 'e1000_mbx.c',
+ 'e1000_nvm.c',
+ 'e1000_osdep.c',
+ 'e1000_phy.c',
+ 'e1000_vf.c'
+]
+
+error_cflags = ['-Wno-uninitialized', '-Wno-unused-parameter',
+ '-Wno-unused-variable', '-Wno-misleading-indentation',
+ '-Wno-implicit-fallthrough']
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('e1000_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 5668910c..23b089c8 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _E1000_ETHDEV_H_
@@ -257,6 +228,12 @@ struct igb_ethertype_filter {
uint32_t etqf;
};
+struct igb_rte_flow_rss_conf {
+ struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ uint16_t num; /**< Number of entries in queue[]. */
+ uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
+};
+
/*
* Structure to store filters'info.
*/
@@ -274,6 +251,8 @@ struct e1000_filter_info {
struct e1000_2tuple_filter_list twotuple_list;
/* store the SYN filter info */
uint32_t syn_info;
+ /* store the rss filter info */
+ struct igb_rte_flow_rss_conf rss_info;
};
/*
@@ -342,6 +321,12 @@ struct igb_flex_filter_ele {
struct rte_eth_flex_filter filter_info;
};
+/* rss filter list structure */
+struct igb_rss_conf_ele {
+ TAILQ_ENTRY(igb_rss_conf_ele) entries;
+ struct igb_rte_flow_rss_conf filter_info;
+};
+
/* igb_flow memory list structure */
struct igb_flow_mem {
TAILQ_ENTRY(igb_flow_mem) entries;
@@ -357,6 +342,8 @@ TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
struct igb_syn_filter_list igb_filter_syn_list;
TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
struct igb_flex_filter_list igb_filter_flex_list;
+TAILQ_HEAD(igb_rss_filter_list, igb_rss_conf_ele);
+struct igb_rss_filter_list igb_filter_rss_list;
TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
struct igb_flow_mem_list igb_flow_list;
@@ -500,4 +487,8 @@ int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add);
+int igb_config_rss_filter(struct rte_eth_dev *dev,
+ struct igb_rte_flow_rss_conf *conf,
+ bool add);
+
#endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/e1000_logs.h b/drivers/net/e1000/e1000_logs.h
index 81e7bf52..50348e9e 100644
--- a/drivers/net/e1000/e1000_logs.h
+++ b/drivers/net/e1000/e1000_logs.h
@@ -1,47 +1,16 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _E1000_LOGS_H_
#define _E1000_LOGS_H_
+extern int e1000_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+ rte_log(RTE_LOG_ ## level, e1000_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
-#ifdef RTE_LIBRTE_E1000_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
#ifdef RTE_LIBRTE_E1000_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -64,12 +33,10 @@
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
#endif
-#ifdef RTE_LIBRTE_E1000_DEBUG_DRIVER
+extern int e1000_logtype_driver;
#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, e1000_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index a0c3b4dc..242375ff 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <sys/queue.h>
@@ -45,7 +16,7 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_eal.h>
@@ -134,6 +105,9 @@ static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
static enum e1000_fc_mode em_fc_setting = e1000_fc_full;
+int e1000_logtype_init;
+int e1000_logtype_driver;
+
/*
* The set of PCI devices this driver supports
*/
@@ -161,6 +135,7 @@ static const struct rte_pci_id pci_id_em_map[] = {
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH2_LV_LM) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) },
@@ -286,6 +261,7 @@ eth_em_dev_is_ich8(struct e1000_hw *hw)
DEBUGFUNC("eth_em_dev_is_ich8");
switch (hw->device_id) {
+ case E1000_DEV_ID_PCH2_LV_LM:
case E1000_DEV_ID_PCH_LPT_I217_LM:
case E1000_DEV_ID_PCH_LPT_I217_V:
case E1000_DEV_ID_PCH_LPTLP_I218_LM:
@@ -586,6 +562,30 @@ em_set_pba(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_PBA, pba);
}
+static void
+eth_em_rxtx_control(struct rte_eth_dev *dev,
+ bool enable)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tctl, rctl;
+
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ if (enable) {
+ /* enable Tx/Rx */
+ tctl |= E1000_TCTL_EN;
+ rctl |= E1000_RCTL_EN;
+ } else {
+ /* disable Tx/Rx */
+ tctl &= ~E1000_TCTL_EN;
+ rctl &= ~E1000_RCTL_EN;
+ }
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
static int
eth_em_start(struct rte_eth_dev *dev)
{
@@ -759,6 +759,9 @@ eth_em_start(struct rte_eth_dev *dev)
adapter->stopped = 0;
+ eth_em_rxtx_control(dev, true);
+ eth_em_link_update(dev, 0);
+
PMD_INIT_LOG(DEBUG, "<<");
return 0;
@@ -784,6 +787,7 @@ eth_em_stop(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ eth_em_rxtx_control(dev, false);
em_rxq_intr_disable(hw);
em_lsc_intr_disable(hw);
@@ -1209,7 +1213,7 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
link.link_speed = 0;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ link.link_autoneg = ETH_LINK_FIXED;
}
rte_em_dev_atomic_write_link_status(dev, &link);
@@ -1610,7 +1614,6 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- uint32_t tctl, rctl;
struct rte_eth_link link;
int ret;
@@ -1642,21 +1645,6 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
- tctl = E1000_READ_REG(hw, E1000_TCTL);
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- if (link.link_status) {
- /* enable Tx/Rx */
- tctl |= E1000_TCTL_EN;
- rctl |= E1000_RCTL_EN;
- } else {
- /* disable Tx/Rx */
- tctl &= ~E1000_TCTL_EN;
- rctl &= ~E1000_RCTL_EN;
- }
- E1000_WRITE_REG(hw, E1000_TCTL, tctl);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- E1000_WRITE_FLUSH(hw);
-
return 0;
}
@@ -1678,7 +1666,7 @@ eth_em_interrupt_handler(void *param)
eth_em_interrupt_get_status(dev);
eth_em_interrupt_action(dev, dev->intr_handle);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int
@@ -1875,3 +1863,15 @@ eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(e1000_init_log);
+static void
+e1000_init_log(void)
+{
+ e1000_logtype_init = rte_log_register("pmd.net.e1000.init");
+ if (e1000_logtype_init >= 0)
+ rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE);
+ e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver");
+ if (e1000_logtype_driver >= 0)
+ rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 1d8f0794..02fae100 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <sys/queue.h>
@@ -60,7 +31,7 @@
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_ip.h>
#include <rte_udp.h>
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index fdc139f3..3c5138de 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <sys/queue.h>
@@ -45,7 +16,7 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_eal.h>
@@ -948,6 +919,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
TAILQ_INIT(&igb_filter_ethertype_list);
TAILQ_INIT(&igb_filter_syn_list);
TAILQ_INIT(&igb_filter_flex_list);
+ TAILQ_INIT(&igb_filter_rss_list);
TAILQ_INIT(&igb_flow_list);
return 0;
@@ -1007,6 +979,10 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
memset(filter_info->ethertype_filters, 0,
E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter));
+ /* clear the rss filter info */
+ memset(&filter_info->rss_info, 0,
+ sizeof(struct igb_rte_flow_rss_conf));
+
/* remove all ntuple filters of the device */
igb_ntuple_filter_uninit(eth_dev);
@@ -1212,7 +1188,7 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
uint16_t nb_rx_q = dev->data->nb_rx_queues;
- uint16_t nb_tx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_tx_queues;
if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
tx_mq_mode == ETH_MQ_TX_DCB ||
@@ -1301,6 +1277,31 @@ eth_igb_configure(struct rte_eth_dev *dev)
return 0;
}
+static void
+eth_igb_rxtx_control(struct rte_eth_dev *dev,
+ bool enable)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tctl, rctl;
+
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+ if (enable) {
+ /* enable Tx/Rx */
+ tctl |= E1000_TCTL_EN;
+ rctl |= E1000_RCTL_EN;
+ } else {
+ /* disable Tx/Rx */
+ tctl &= ~E1000_TCTL_EN;
+ rctl &= ~E1000_RCTL_EN;
+ }
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
static int
eth_igb_start(struct rte_eth_dev *dev)
{
@@ -1504,6 +1505,9 @@ eth_igb_start(struct rte_eth_dev *dev)
/* restore all types filter */
igb_filter_restore(dev);
+ eth_igb_rxtx_control(dev, true);
+ eth_igb_link_update(dev, 0);
+
PMD_INIT_LOG(DEBUG, "<<");
return 0;
@@ -1529,6 +1533,8 @@ eth_igb_stop(struct rte_eth_dev *dev)
struct rte_eth_link link;
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ eth_igb_rxtx_control(dev, false);
+
igb_intr_disable(hw);
/* disable intr eventfd mapping */
@@ -2435,7 +2441,7 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
link.link_speed = 0;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_status = ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ link.link_autoneg = ETH_LINK_FIXED;
}
rte_igb_dev_atomic_write_link_status(dev, &link);
@@ -2859,7 +2865,6 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- uint32_t tctl, rctl;
struct rte_eth_link link;
int ret;
@@ -2901,22 +2906,8 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
pci_dev->addr.bus,
pci_dev->addr.devid,
pci_dev->addr.function);
- tctl = E1000_READ_REG(hw, E1000_TCTL);
- rctl = E1000_READ_REG(hw, E1000_RCTL);
- if (link.link_status) {
- /* enable Tx/Rx */
- tctl |= E1000_TCTL_EN;
- rctl |= E1000_RCTL_EN;
- } else {
- /* disable Tx/Rx */
- tctl &= ~E1000_TCTL_EN;
- rctl &= ~E1000_RCTL_EN;
- }
- E1000_WRITE_REG(hw, E1000_TCTL, tctl);
- E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- E1000_WRITE_FLUSH(hw);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
return 0;
@@ -2970,13 +2961,17 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
struct e1000_mbx_info *mbx = &hw->mbx;
u32 in_msg = 0;
- if (mbx->ops.read(hw, &in_msg, 1, 0))
- return;
+ /* peek the message first */
+ in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0));
/* PF reset VF event */
- if (in_msg == E1000_PF_CONTROL_MSG)
+ if (in_msg == E1000_PF_CONTROL_MSG) {
+ /* dummy mbx read to ack pf */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ return;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
- NULL, NULL);
+ NULL);
+ }
}
static int
@@ -3300,7 +3295,8 @@ igbvf_dev_start(struct rte_eth_dev *dev)
}
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
intr_vector = dev->data->nb_rx_queues;
ret = rte_intr_efd_enable(intr_handle, intr_vector);
if (ret)
@@ -5628,6 +5624,17 @@ igb_flex_filter_restore(struct rte_eth_dev *dev)
}
}
+/* restore rss filter */
+static inline void
+igb_rss_filter_restore(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->rss_info.num)
+ igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
+}
+
/* restore all types filter */
static int
igb_filter_restore(struct rte_eth_dev *dev)
@@ -5636,6 +5643,7 @@ igb_filter_restore(struct rte_eth_dev *dev)
igb_ethertype_filter_restore(dev);
igb_syn_filter_restore(dev);
igb_flex_filter_restore(dev);
+ igb_rss_filter_restore(dev);
return 0;
}
diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
index 22bad265..a1427596 100644
--- a/drivers/net/e1000/igb_flow.c
+++ b/drivers/net/e1000/igb_flow.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <sys/queue.h>
@@ -44,7 +15,7 @@
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_eal.h>
@@ -1295,6 +1266,101 @@ igb_parse_flex_filter(struct rte_eth_dev *dev,
return 0;
}
+static int
+igb_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct igb_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ uint16_t n, index;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ index = 0;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+
+ if (rss->rss_conf)
+ rss_conf->rss_conf = *rss->rss_conf;
+ else
+ rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL;
+
+ for (n = 0; n < rss->num; ++n)
+ rss_conf->queue[n] = rss->queue[n];
+ rss_conf->num = rss->num;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
/**
* Create a flow rule.
* Theorically one rule can match more than one filters.
@@ -1313,11 +1379,13 @@ igb_flow_create(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_syn_filter syn_filter;
struct rte_eth_flex_filter flex_filter;
+ struct igb_rte_flow_rss_conf rss_conf;
struct rte_flow *flow = NULL;
struct igb_ntuple_filter_ele *ntuple_filter_ptr;
struct igb_ethertype_filter_ele *ethertype_filter_ptr;
struct igb_eth_syn_filter_ele *syn_filter_ptr;
struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_rss_conf_ele *rss_filter_ptr;
struct igb_flow_mem *igb_flow_mem_ptr;
flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
@@ -1345,6 +1413,11 @@ igb_flow_create(struct rte_eth_dev *dev,
if (!ret) {
ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
sizeof(struct igb_ntuple_filter_ele), 0);
+ if (!ntuple_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+
rte_memcpy(&ntuple_filter_ptr->filter_info,
&ntuple_filter,
sizeof(struct rte_eth_ntuple_filter));
@@ -1367,6 +1440,11 @@ igb_flow_create(struct rte_eth_dev *dev,
ethertype_filter_ptr = rte_zmalloc(
"igb_ethertype_filter",
sizeof(struct igb_ethertype_filter_ele), 0);
+ if (!ethertype_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+
rte_memcpy(&ethertype_filter_ptr->filter_info,
&ethertype_filter,
sizeof(struct rte_eth_ethertype_filter));
@@ -1387,6 +1465,11 @@ igb_flow_create(struct rte_eth_dev *dev,
if (!ret) {
syn_filter_ptr = rte_zmalloc("igb_syn_filter",
sizeof(struct igb_eth_syn_filter_ele), 0);
+ if (!syn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+
rte_memcpy(&syn_filter_ptr->filter_info,
&syn_filter,
sizeof(struct rte_eth_syn_filter));
@@ -1408,6 +1491,11 @@ igb_flow_create(struct rte_eth_dev *dev,
if (!ret) {
flex_filter_ptr = rte_zmalloc("igb_flex_filter",
sizeof(struct igb_flex_filter_ele), 0);
+ if (!flex_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+
rte_memcpy(&flex_filter_ptr->filter_info,
&flex_filter,
sizeof(struct rte_eth_flex_filter));
@@ -1419,6 +1507,29 @@ igb_flow_create(struct rte_eth_dev *dev,
}
}
+ memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ ret = igb_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+ if (!ret) {
+ ret = igb_config_rss_filter(dev, &rss_conf, TRUE);
+ if (!ret) {
+ rss_filter_ptr = rte_zmalloc("igb_rss_filter",
+ sizeof(struct igb_rss_conf_ele), 0);
+ if (!rss_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&rss_filter_ptr->filter_info,
+ &rss_conf,
+ sizeof(struct igb_rte_flow_rss_conf));
+ TAILQ_INSERT_TAIL(&igb_filter_rss_list,
+ rss_filter_ptr, entries);
+ flow->rule = rss_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_HASH;
+ return flow;
+ }
+ }
+
out:
TAILQ_REMOVE(&igb_flow_list,
igb_flow_mem_ptr, entries);
@@ -1446,6 +1557,7 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_syn_filter syn_filter;
struct rte_eth_flex_filter flex_filter;
+ struct igb_rte_flow_rss_conf rss_conf;
int ret;
memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
@@ -1469,6 +1581,12 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
ret = igb_parse_flex_filter(dev, attr, pattern,
actions, &flex_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ ret = igb_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
return ret;
}
@@ -1487,6 +1605,7 @@ igb_flow_destroy(struct rte_eth_dev *dev,
struct igb_eth_syn_filter_ele *syn_filter_ptr;
struct igb_flex_filter_ele *flex_filter_ptr;
struct igb_flow_mem *igb_flow_mem_ptr;
+ struct igb_rss_conf_ele *rss_filter_ptr;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
@@ -1533,6 +1652,17 @@ igb_flow_destroy(struct rte_eth_dev *dev,
rte_free(flex_filter_ptr);
}
break;
+ case RTE_ETH_FILTER_HASH:
+ rss_filter_ptr = (struct igb_rss_conf_ele *)
+ pmd_flow->rule;
+ ret = igb_config_rss_filter(dev,
+ &rss_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&igb_filter_rss_list,
+ rss_filter_ptr, entries);
+ rte_free(rss_filter_ptr);
+ }
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1621,6 +1751,17 @@ igb_clear_all_flex_filter(struct rte_eth_dev *dev)
igb_remove_flex_filter(dev, flex_filter);
}
+/* remove the rss filter */
+static void
+igb_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct e1000_filter_info *filter =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter->rss_info.num)
+ igb_config_rss_filter(dev, &filter->rss_info, FALSE);
+}
+
void
igb_filterlist_flush(struct rte_eth_dev *dev)
{
@@ -1628,6 +1769,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
struct igb_ethertype_filter_ele *ethertype_filter_ptr;
struct igb_eth_syn_filter_ele *syn_filter_ptr;
struct igb_flex_filter_ele *flex_filter_ptr;
+ struct igb_rss_conf_ele *rss_filter_ptr;
struct igb_flow_mem *igb_flow_mem_ptr;
enum rte_filter_type filter_type;
struct rte_flow *pmd_flow;
@@ -1670,6 +1812,14 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
flex_filter_ptr, entries);
rte_free(flex_filter_ptr);
break;
+ case RTE_ETH_FILTER_HASH:
+ rss_filter_ptr =
+ (struct igb_rss_conf_ele *)
+ pmd_flow->rule;
+ TAILQ_REMOVE(&igb_filter_rss_list,
+ rss_filter_ptr, entries);
+ rte_free(rss_filter_ptr);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type"
"(%d) not supported", filter_type);
@@ -1693,6 +1843,7 @@ igb_flow_flush(struct rte_eth_dev *dev,
igb_clear_all_ethertype_filter(dev);
igb_clear_syn_filter(dev);
igb_clear_all_flex_filter(dev);
+ igb_clear_rss_filter(dev);
igb_filterlist_flush(dev);
return 0;
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index cd6ae2fb..b9f2e539 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <stdio.h>
@@ -45,7 +16,7 @@
#include <rte_debug.h>
#include <rte_eal.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_memcpy.h>
#include <rte_malloc.h>
#include <rte_random.h>
diff --git a/drivers/net/e1000/igb_regs.h b/drivers/net/e1000/igb_regs.h
index 0b5e5e58..cacd49c7 100644
--- a/drivers/net/e1000/igb_regs.h
+++ b/drivers/net/e1000/igb_regs.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Intel Corporation
*/
#ifndef _IGB_REGS_H_
#define _IGB_REGS_H_
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 4ee12e9e..2f371672 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <sys/queue.h>
@@ -60,7 +31,7 @@
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
@@ -2786,3 +2757,64 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_thresh.hthresh = txq->hthresh;
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
}
+
+int
+igb_config_rss_filter(struct rte_eth_dev *dev,
+ struct igb_rte_flow_rss_conf *conf, bool add)
+{
+ uint32_t shift;
+ uint16_t i, j;
+ struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct e1000_filter_info *filter_info =
+ E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!add) {
+ if (memcmp(conf, &filter_info->rss_info,
+ sizeof(struct igb_rte_flow_rss_conf)) == 0) {
+ igb_rss_disable(dev);
+ memset(&filter_info->rss_info, 0,
+ sizeof(struct igb_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (filter_info->rss_info.num)
+ return -EINVAL;
+
+ /* Fill in redirection table. */
+ shift = (hw->mac.type == e1000_82575) ? 6 : 0;
+ for (i = 0, j = 0; i < 128; i++, j++) {
+ union e1000_reta {
+ uint32_t dword;
+ uint8_t bytes[4];
+ } reta;
+ uint8_t q_idx;
+
+ q_idx = conf->queue[j];
+ if (j == conf->num)
+ j = 0;
+ reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
+ if ((i & 3) == 3)
+ E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
+ }
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
+ igb_rss_disable(dev);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ igb_hw_rss_hash_set(hw, &rss_conf);
+
+ rte_memcpy(&filter_info->rss_info,
+ conf, sizeof(struct igb_rte_flow_rss_conf));
+
+ return 0;
+}
diff --git a/drivers/net/e1000/meson.build b/drivers/net/e1000/meson.build
new file mode 100644
index 00000000..3a1bf5af
--- /dev/null
+++ b/drivers/net/e1000/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'em_ethdev.c',
+ 'em_rxtx.c',
+ 'igb_ethdev.c',
+ 'igb_flow.c',
+ 'igb_pf.c',
+ 'igb_rxtx.c'
+)
+
+includes += include_directories('base')
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index accecf51..8cba319e 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -96,7 +96,7 @@ typedef uint64_t dma_addr_t;
#define ENA_GET_SYSTEM_USECS() \
(rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz())
-#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
#define ENA_ASSERT(cond, format, arg...) \
do { \
if (unlikely(!(cond))) { \
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 22db8951..34b2a8d7 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -32,7 +32,7 @@
*/
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_tcp.h>
#include <rte_atomic.h>
@@ -164,6 +164,14 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
+#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
+ DEV_TX_OFFLOAD_UDP_CKSUM |\
+ DEV_TX_OFFLOAD_IPV4_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_TSO)
+#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
+ PKT_TX_IP_CKSUM |\
+ PKT_TX_TCP_SEG)
+
/** Vendor ID used by Amazon devices */
#define PCI_VENDOR_ID_AMAZON 0x1D0F
/** Amazon devices */
@@ -178,6 +186,9 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
+int ena_logtype_init;
+int ena_logtype_driver;
+
static const struct rte_pci_id pci_id_ena_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) },
@@ -227,6 +238,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
+static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
+ uint64_t offloads);
+static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
+ uint64_t offloads);
static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
@@ -260,16 +275,17 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
struct ena_com_rx_ctx *ena_rx_ctx)
{
uint64_t ol_flags = 0;
+ uint32_t packet_type = 0;
if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
- ol_flags |= PKT_TX_TCP_CKSUM;
+ packet_type |= RTE_PTYPE_L4_TCP;
else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
- ol_flags |= PKT_TX_UDP_CKSUM;
+ packet_type |= RTE_PTYPE_L4_UDP;
if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)
- ol_flags |= PKT_TX_IPV4;
+ packet_type |= RTE_PTYPE_L3_IPV4;
else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)
- ol_flags |= PKT_TX_IPV6;
+ packet_type |= RTE_PTYPE_L3_IPV6;
if (unlikely(ena_rx_ctx->l4_csum_err))
ol_flags |= PKT_RX_L4_CKSUM_BAD;
@@ -277,24 +293,28 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags = ol_flags;
+ mbuf->packet_type = packet_type;
}
static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
- struct ena_com_tx_ctx *ena_tx_ctx)
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ uint64_t queue_offloads)
{
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
- if (mbuf->ol_flags &
- (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) {
+ if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
+ (queue_offloads & QUEUE_OFFLOADS)) {
/* check if TSO is required */
- if (mbuf->ol_flags & PKT_TX_TCP_SEG) {
+ if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
+ (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
ena_tx_ctx->tso_enable = true;
ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
}
/* check if L3 checksum is needed */
- if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
+ if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
+ (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
ena_tx_ctx->l3_csum_enable = true;
if (mbuf->ol_flags & PKT_TX_IPV6) {
@@ -310,19 +330,17 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
}
/* check if L4 checksum is needed */
- switch (mbuf->ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
+ (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
ena_tx_ctx->l4_csum_enable = true;
- break;
- case PKT_TX_UDP_CKSUM:
+ } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
+ (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
ena_tx_ctx->l4_csum_enable = true;
- break;
- default:
+ } else {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
ena_tx_ctx->l4_csum_enable = false;
- break;
}
ena_meta->mss = mbuf->tso_segsz;
@@ -755,7 +773,8 @@ static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
{
uint32_t max_frame_len = adapter->max_mtu;
- if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1)
+ if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME)
max_frame_len =
adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
@@ -945,7 +964,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
__rte_unused unsigned int socket_id,
- __rte_unused const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf)
{
struct ena_com_create_io_ctx ctx =
/* policy set to _HOST just to satisfy icc compiler */
@@ -982,6 +1001,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
+ if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
+ !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
+ RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
+ return -EINVAL;
+ }
+
ena_qid = ENA_IO_TXQ_IDX(queue_idx);
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1036,6 +1061,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
for (i = 0; i < txq->ring_size; i++)
txq->empty_tx_reqs[i] = i;
+ txq->offloads = tx_conf->offloads;
+
/* Store pointer to this queue in upper layer */
txq->configured = 1;
dev->data->tx_queues[queue_idx] = txq;
@@ -1047,7 +1074,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
__rte_unused unsigned int socket_id,
- __rte_unused const struct rte_eth_rxconf *rx_conf,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct ena_com_create_io_ctx ctx =
@@ -1083,6 +1110,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
+ if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
+ RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
+ return -EINVAL;
+ }
+
ena_qid = ENA_IO_RXQ_IDX(queue_idx);
ctx.qid = ena_qid;
@@ -1386,6 +1418,22 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
{
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+
+ if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
+ RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+ tx_offloads, adapter->tx_supported_offloads);
+ return -ENOTSUP;
+ }
+
+ if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+ rx_offloads, adapter->rx_supported_offloads);
+ return -ENOTSUP;
+ }
if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
@@ -1407,6 +1455,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
break;
}
+ adapter->tx_selected_offloads = tx_offloads;
+ adapter->rx_selected_offloads = rx_offloads;
return 0;
}
@@ -1435,13 +1485,39 @@ static void ena_init_rings(struct ena_adapter *adapter)
}
}
+static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
+ uint64_t offloads)
+{
+ uint64_t port_offloads = adapter->tx_selected_offloads;
+
+ /* Check if port supports all requested offloads.
+ * True if all offloads selected for queue are set for port.
+ */
+ if ((offloads & port_offloads) != offloads)
+ return false;
+ return true;
+}
+
+static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
+ uint64_t offloads)
+{
+ uint64_t port_offloads = adapter->rx_selected_offloads;
+
+ /* Check if port supports all requested offloads.
+ * True if all offloads selected for queue are set for port.
+ */
+ if ((offloads & port_offloads) != offloads)
+ return false;
+ return true;
+}
+
static void ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct ena_adapter *adapter;
struct ena_com_dev *ena_dev;
struct ena_com_dev_get_features_ctx feat;
- uint32_t rx_feat = 0, tx_feat = 0;
+ uint64_t rx_feat = 0, tx_feat = 0;
int rc = 0;
ena_assert_msg(dev->data != NULL, "Uninitialized device");
@@ -1487,9 +1563,13 @@ static void ena_infos_get(struct rte_eth_dev *dev,
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
+ rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
/* Inform framework about available features */
dev_info->rx_offload_capa = rx_feat;
+ dev_info->rx_queue_offload_capa = rx_feat;
dev_info->tx_offload_capa = tx_feat;
+ dev_info->tx_queue_offload_capa = tx_feat;
dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
dev_info->max_rx_pktlen = adapter->max_mtu;
@@ -1498,6 +1578,9 @@ static void ena_infos_get(struct rte_eth_dev *dev,
dev_info->max_rx_queues = adapter->num_queues;
dev_info->max_tx_queues = adapter->num_queues;
dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
+
+ adapter->tx_supported_offloads = tx_feat;
+ adapter->rx_supported_offloads = rx_feat;
}
static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -1714,7 +1797,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
} /* there's no else as we take advantage of memset zeroing */
/* Set TX offloads flags, if applicable */
- ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx);
+ ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads);
if (unlikely(mbuf->ol_flags &
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD)))
@@ -1814,3 +1897,15 @@ static struct rte_pci_driver rte_ena_pmd = {
RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(ena_init_log);
+static void
+ena_init_log(void)
+{
+ ena_logtype_init = rte_log_register("pmd.net.ena.init");
+ if (ena_logtype_init >= 0)
+ rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE);
+ ena_logtype_driver = rte_log_register("pmd.net.ena.driver");
+ if (ena_logtype_driver >= 0)
+ rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index be8bc9fa..394d05e0 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -91,6 +91,7 @@ struct ena_ring {
uint8_t tx_max_header_size;
int configured;
struct ena_adapter *adapter;
+ uint64_t offloads;
} __rte_cache_aligned;
enum ena_adapter_state {
@@ -175,6 +176,10 @@ struct ena_adapter {
struct ena_driver_stats *drv_stats;
enum ena_adapter_state state;
+ uint64_t tx_supported_offloads;
+ uint64_t tx_selected_offloads;
+ uint64_t rx_supported_offloads;
+ uint64_t rx_selected_offloads;
};
#endif /* _ENA_ETHDEV_H_ */
diff --git a/drivers/net/ena/ena_logs.h b/drivers/net/ena/ena_logs.h
index c6c8a41b..2c0e91b6 100644
--- a/drivers/net/ena/ena_logs.h
+++ b/drivers/net/ena/ena_logs.h
@@ -34,10 +34,10 @@
#ifndef _ENA_LOGS_H_
#define _ENA_LOGS_H_
-#define RTE_LOGTYPE_ENA RTE_LOGTYPE_USER1
-
+extern int ena_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ rte_log(RTE_LOG_ ## level, ena_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
#ifdef RTE_LIBRTE_ENA_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -60,11 +60,9 @@
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
#endif
-#ifdef RTE_LIBRTE_ENA_DEBUG_DRIVER
+extern int ena_logtype_driver;
#define PMD_DRV_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, ena_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
#endif /* _ENA_LOGS_H_ */
diff --git a/drivers/net/ena/ena_platform.h b/drivers/net/ena/ena_platform.h
index 0df82d6f..a2239a92 100644
--- a/drivers/net/ena/ena_platform.h
+++ b/drivers/net/ena/ena_platform.h
@@ -49,7 +49,7 @@
#define ena_assert_msg(cond, msg) \
do { \
if (unlikely(!(cond))) { \
- RTE_LOG(ERR, ENA, \
+ rte_log(RTE_LOG_ERR, ena_logtype_driver, \
"Assert failed on %s:%s:%d: ", \
__FILE__, __func__, __LINE__); \
rte_panic(msg); \
diff --git a/drivers/net/enic/LICENSE b/drivers/net/enic/LICENSE
deleted file mode 100644
index 46a27a4e..00000000
--- a/drivers/net/enic/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 5191db54..7c6c29cc 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,34 +1,6 @@
-#
-# Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
# Copyright 2007 Nuova Systems, Inc. All rights reserved.
-#
-# Copyright (c) 2014, Cisco Systems, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/enic/base/cq_desc.h b/drivers/net/enic/base/cq_desc.h
index f3ef6bb5..7e138027 100644
--- a/drivers/net/enic/base/cq_desc.h
+++ b/drivers/net/enic/base/cq_desc.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _CQ_DESC_H_
diff --git a/drivers/net/enic/base/cq_enet_desc.h b/drivers/net/enic/base/cq_enet_desc.h
index e8410563..5ced63cb 100644
--- a/drivers/net/enic/base/cq_enet_desc.h
+++ b/drivers/net/enic/base/cq_enet_desc.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _CQ_ENET_DESC_H_
diff --git a/drivers/net/enic/base/rq_enet_desc.h b/drivers/net/enic/base/rq_enet_desc.h
index 13e24b43..3585bf3a 100644
--- a/drivers/net/enic/base/rq_enet_desc.h
+++ b/drivers/net/enic/base/rq_enet_desc.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _RQ_ENET_DESC_H_
diff --git a/drivers/net/enic/base/vnic_cq.c b/drivers/net/enic/base/vnic_cq.c
index 3549fece..63a503f2 100644
--- a/drivers/net/enic/base/vnic_cq.c
+++ b/drivers/net/enic/base/vnic_cq.c
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "vnic_dev.h"
diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h
index 13ab87ca..432219af 100644
--- a/drivers/net/enic/base/vnic_cq.h
+++ b/drivers/net/enic/base/vnic_cq.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _VNIC_CQ_H_
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 9b25d219..05b595eb 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <rte_memzone.h>
@@ -78,7 +49,6 @@ struct vnic_dev {
enum vnic_proxy_type proxy;
u32 proxy_index;
u64 args[VNIC_DEVCMD_NARGS];
- u16 split_hdr_size;
int in_reset;
struct vnic_intr_coal_timer_info intr_coal_timer_info;
void *(*alloc_consistent)(void *priv, size_t size,
@@ -251,16 +221,6 @@ unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
return ring->size_unaligned;
}
-void vnic_set_hdr_split_size(struct vnic_dev *vdev, u16 size)
-{
- vdev->split_hdr_size = size;
-}
-
-u16 vnic_get_hdr_split_size(struct vnic_dev *vdev)
-{
- return vdev->split_hdr_size;
-}
-
void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
{
memset(ring->descs, 0, ring->size);
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index c9ca25b3..8c099206 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _VNIC_DEV_H_
@@ -120,8 +91,6 @@ unsigned long vnic_dev_get_res_type_len(struct vnic_dev *vdev,
unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
-void vnic_set_hdr_split_size(struct vnic_dev *vdev, u16 size);
-u16 vnic_get_hdr_split_size(struct vnic_dev *vdev);
int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size, unsigned int socket_id,
char *z_name);
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index 05d87b91..6b95bc48 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _VNIC_DEVCMD_H_
diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h
index 50622479..26918335 100644
--- a/drivers/net/enic/base/vnic_enet.h
+++ b/drivers/net/enic/base/vnic_enet.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _VNIC_ENIC_H_
diff --git a/drivers/net/enic/base/vnic_intr.c b/drivers/net/enic/base/vnic_intr.c
index 04bb4261..4487c7c4 100644
--- a/drivers/net/enic/base/vnic_intr.c
+++ b/drivers/net/enic/base/vnic_intr.c
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "vnic_dev.h"
diff --git a/drivers/net/enic/base/vnic_intr.h b/drivers/net/enic/base/vnic_intr.h
index da089bcf..13637385 100644
--- a/drivers/net/enic/base/vnic_intr.h
+++ b/drivers/net/enic/base/vnic_intr.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _VNIC_INTR_H_
diff --git a/drivers/net/enic/base/vnic_nic.h b/drivers/net/enic/base/vnic_nic.h
index 88907c00..a753b3a5 100644
--- a/drivers/net/enic/base/vnic_nic.h
+++ b/drivers/net/enic/base/vnic_nic.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _VNIC_NIC_H_
diff --git a/drivers/net/enic/base/vnic_resource.h b/drivers/net/enic/base/vnic_resource.h
index b7a9b612..184bfa74 100644
--- a/drivers/net/enic/base/vnic_resource.h
+++ b/drivers/net/enic/base/vnic_resource.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _VNIC_RESOURCE_H_
diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c
index ea297eef..72fcef55 100644
--- a/drivers/net/enic/base/vnic_rq.c
+++ b/drivers/net/enic/base/vnic_rq.c
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "vnic_dev.h"
diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
index f3fd39f7..d774bb0d 100644
--- a/drivers/net/enic/base/vnic_rq.h
+++ b/drivers/net/enic/base/vnic_rq.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _VNIC_RQ_H_
diff --git a/drivers/net/enic/base/vnic_rss.c b/drivers/net/enic/base/vnic_rss.c
index 87d40c0d..f41b8660 100644
--- a/drivers/net/enic/base/vnic_rss.c
+++ b/drivers/net/enic/base/vnic_rss.c
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "enic_compat.h"
diff --git a/drivers/net/enic/base/vnic_rss.h b/drivers/net/enic/base/vnic_rss.h
index ebb18b59..abd7b9f1 100644
--- a/drivers/net/enic/base/vnic_rss.h
+++ b/drivers/net/enic/base/vnic_rss.h
@@ -1,34 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _VNIC_RSS_H_
diff --git a/drivers/net/enic/base/vnic_stats.h b/drivers/net/enic/base/vnic_stats.h
index 0c779d8a..49429cc2 100644
--- a/drivers/net/enic/base/vnic_stats.h
+++ b/drivers/net/enic/base/vnic_stats.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _VNIC_STATS_H_
diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
index 0a1247f4..d61c4c6e 100644
--- a/drivers/net/enic/base/vnic_wq.c
+++ b/drivers/net/enic/base/vnic_wq.c
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "vnic_dev.h"
diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
index 38a217f1..7c069c06 100644
--- a/drivers/net/enic/base/vnic_wq.h
+++ b/drivers/net/enic/base/vnic_wq.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _VNIC_WQ_H_
diff --git a/drivers/net/enic/base/wq_enet_desc.h b/drivers/net/enic/base/wq_enet_desc.h
index db41d00e..cdf22fff 100644
--- a/drivers/net/enic/base/wq_enet_desc.h
+++ b/drivers/net/enic/base/wq_enet_desc.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _WQ_ENET_DESC_H_
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e36ec385..c083985e 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _ENIC_H_
@@ -53,13 +24,6 @@
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
-#define ENIC_WQ_MAX 8
-/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both
- * RQs use the same CQ.
- */
-#define ENIC_RQ_MAX 16
-#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))
-#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
#define ENIC_MAX_MAC_ADDR 64
#define VLAN_ETH_HLEN 18
@@ -150,17 +114,17 @@ struct enic {
unsigned int flags;
unsigned int priv_flags;
- /* work queue */
- struct vnic_wq wq[ENIC_WQ_MAX];
- unsigned int wq_count;
+ /* work queue (len = conf_wq_count) */
+ struct vnic_wq *wq;
+ unsigned int wq_count; /* equals eth_dev nb_tx_queues */
- /* receive queue */
- struct vnic_rq rq[ENIC_RQ_MAX];
- unsigned int rq_count;
+ /* receive queue (len = conf_rq_count) */
+ struct vnic_rq *rq;
+ unsigned int rq_count; /* equals eth_dev nb_rx_queues */
- /* completion queue */
- struct vnic_cq cq[ENIC_CQ_MAX];
- unsigned int cq_count;
+ /* completion queue (len = conf_cq_count) */
+ struct vnic_cq *cq;
+ unsigned int cq_count; /* equals rq_count + wq_count */
/* interrupt resource */
struct vnic_intr intr;
@@ -277,7 +241,6 @@ extern int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
uint16_t nb_desc, uint16_t free_thresh);
extern int enic_set_rss_nic_cfg(struct enic *enic);
extern int enic_set_vnic_res(struct enic *enic);
-extern void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size);
extern int enic_enable(struct enic *enic);
extern int enic_disable(struct enic *enic);
extern void enic_remove(struct enic *enic);
@@ -305,6 +268,8 @@ uint16_t enic_dummy_recv_pkts(void *rx_queue,
uint16_t nb_pkts);
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
int enic_link_update(struct enic *enic);
void enic_fdir_info(struct enic *enic);
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index 9b461424..3ef1d083 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -1,40 +1,11 @@
-/*
- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <libgen.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_hash.h>
#include <rte_byteorder.h>
diff --git a/drivers/net/enic/enic_compat.h b/drivers/net/enic/enic_compat.h
index 1cb5686f..c0af1ed2 100644
--- a/drivers/net/enic/enic_compat.h
+++ b/drivers/net/enic/enic_compat.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _ENIC_COMPAT_H_
@@ -84,6 +55,8 @@
#define dev_warning(x, args...) dev_printk(WARNING, args)
#define dev_debug(x, args...) dev_printk(DEBUG, args)
+extern int enicpmd_logtype_flow;
+
#define __le16 u16
#define __le32 u32
#define __le64 u64
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 669dbf33..d84714ef 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <stdio.h>
@@ -38,7 +9,7 @@
#include <rte_dev.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_string_fns.h>
@@ -49,12 +20,14 @@
#include "vnic_enet.h"
#include "enic.h"
-#ifdef RTE_LIBRTE_ENIC_DEBUG
-#define ENICPMD_FUNC_TRACE() \
- RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__)
-#else
-#define ENICPMD_FUNC_TRACE() (void)0
-#endif
+int enicpmd_logtype_init;
+int enicpmd_logtype_flow;
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
+ "%s" fmt "\n", __func__, ##args)
+
+#define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
/*
* The set of PCI devices this driver supports
@@ -66,6 +39,18 @@ static const struct rte_pci_id pci_id_enic_map[] = {
{.vendor_id = 0, /* sentinel */},
};
+RTE_INIT(enicpmd_init_log);
+static void
+enicpmd_init_log(void)
+{
+ enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
+ if (enicpmd_logtype_init >= 0)
+ rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
+ enicpmd_logtype_flow = rte_log_register("pmd.net.enic.flow");
+ if (enicpmd_logtype_flow >= 0)
+ rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
+}
+
static int
enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
enum rte_filter_op filter_op, void *arg)
@@ -205,13 +190,7 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
- if (queue_idx >= ENIC_WQ_MAX) {
- dev_err(enic,
- "Max number of TX queues exceeded. Max is %d\n",
- ENIC_WQ_MAX);
- return -EINVAL;
- }
-
+ RTE_ASSERT(queue_idx < enic->conf_wq_count);
eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
@@ -325,17 +304,7 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -E_RTE_SECONDARY;
-
- /* With Rx scatter support, two RQs are now used on VIC per RQ used
- * by the application.
- */
- if (queue_idx * 2 >= ENIC_RQ_MAX) {
- dev_err(enic,
- "Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
- ENIC_RQ_MAX);
- return -EINVAL;
- }
-
+ RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
eth_dev->data->rx_queues[queue_idx] =
(void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
@@ -370,7 +339,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
ENICPMD_FUNC_TRACE();
if (mask & ETH_VLAN_STRIP_MASK) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP)
enic->ig_vlan_strip_en = 1;
else
enic->ig_vlan_strip_en = 0;
@@ -406,14 +376,8 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
return ret;
}
- if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
- eth_dev->data->dev_conf.rxmode.header_split) {
- /* Enable header-data-split */
- enic_set_hdr_split_size(enic,
- eth_dev->data->dev_conf.rxmode.split_hdr_size);
- }
-
- enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
+ enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CHECKSUM);
ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
return ret;
@@ -677,6 +641,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &enicpmd_eth_dev_ops;
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &enic_prep_pkts;
pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pdev);
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
index a728d077..28923b0e 100644
--- a/drivers/net/enic/enic_flow.c
+++ b/drivers/net/enic/enic_flow.c
@@ -1,37 +1,10 @@
-/*
- * Copyright (c) 2017, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
*/
#include <errno.h>
#include <rte_log.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_flow_driver.h>
#include <rte_ether.h>
#include <rte_ip.h>
@@ -42,15 +15,12 @@
#include "vnic_dev.h"
#include "vnic_nic.h"
-#ifdef RTE_LIBRTE_ENIC_DEBUG_FLOW
#define FLOW_TRACE() \
- RTE_LOG(DEBUG, PMD, "%s()\n", __func__)
+ rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
+ "%s()\n", __func__)
#define FLOW_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, fmt, ## args)
-#else
-#define FLOW_TRACE() do { } while (0)
-#define FLOW_LOG(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
+ fmt "\n", ##args)
/** Info about how to copy items into enic filters. */
struct enic_items {
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 8af0ccd3..ec9d343f 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <stdio.h>
@@ -45,7 +16,7 @@
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "enic_compat.h"
#include "enic.h"
@@ -98,11 +69,6 @@ enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
}
}
-void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
-{
- vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
-}
-
static void enic_free_wq_buf(struct vnic_wq_buf *buf)
{
struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
@@ -434,7 +400,7 @@ enic_intr_handler(void *arg)
vnic_intr_return_all_credits(&enic->intr);
enic_link_update(enic);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
}
@@ -634,7 +600,8 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
- if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+ if (enic->rte_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SCATTER) {
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
/* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
@@ -1075,6 +1042,9 @@ static void enic_dev_deinit(struct enic *enic)
vnic_dev_notify_unset(enic->vdev);
rte_free(eth_dev->data->mac_addrs);
+ rte_free(enic->cq);
+ rte_free(enic->rq);
+ rte_free(enic->wq);
}
@@ -1082,27 +1052,28 @@ int enic_set_vnic_res(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
int rc = 0;
+ unsigned int required_rq, required_wq, required_cq;
- /* With Rx scatter support, two RQs are now used per RQ used by
- * the application.
- */
- if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
+ /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
+ required_rq = eth_dev->data->nb_rx_queues * 2;
+ required_wq = eth_dev->data->nb_tx_queues;
+ required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+
+ if (enic->conf_rq_count < required_rq) {
dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
eth_dev->data->nb_rx_queues,
- eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
+ required_rq, enic->conf_rq_count);
rc = -EINVAL;
}
- if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
+ if (enic->conf_wq_count < required_wq) {
dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
eth_dev->data->nb_tx_queues, enic->conf_wq_count);
rc = -EINVAL;
}
- if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
- eth_dev->data->nb_tx_queues)) {
+ if (enic->conf_cq_count < required_cq) {
dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
- (eth_dev->data->nb_rx_queues +
- eth_dev->data->nb_tx_queues), enic->conf_cq_count);
+ required_cq, enic->conf_cq_count);
rc = -EINVAL;
}
@@ -1208,7 +1179,8 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
/* The easy case is when scatter is disabled. However if the MTU
* becomes greater than the mbuf data size, packet drops will ensue.
*/
- if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+ if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SCATTER)) {
eth_dev->data->mtu = new_mtu;
goto set_mtu_done;
}
@@ -1307,6 +1279,25 @@ static int enic_dev_init(struct enic *enic)
dev_err(enic, "See the ENIC PMD guide for more information.\n");
return -EINVAL;
}
+ /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
+ enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
+ enic->conf_cq_count, 8);
+ enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
+ enic->conf_rq_count, 8);
+ enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
+ enic->conf_wq_count, 8);
+ if (enic->conf_cq_count > 0 && enic->cq == NULL) {
+ dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_rq_count > 0 && enic->rq == NULL) {
+ dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_wq_count > 0 && enic->wq == NULL) {
+ dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
+ return -1;
+ }
/* Get the supported filters */
enic_fdir_info(enic);
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index e4b80d49..c99d6183 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -1,39 +1,10 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include "enic_compat.h"
-#include "rte_ethdev.h"
+#include "rte_ethdev_driver.h"
#include "wq_enet_desc.h"
#include "rq_enet_desc.h"
#include "cq_enet_desc.h"
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 1135d2e1..cf3a6fde 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -1,35 +1,6 @@
-/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _ENIC_RES_H_
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index a3663d51..2fe5a3fa 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -1,37 +1,11 @@
-/* Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_net.h>
#include <rte_prefetch.h>
#include "enic_compat.h"
@@ -41,6 +15,15 @@
#include <rte_ip.h>
#include <rte_tcp.h>
+#define ENIC_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define ENIC_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK)
+
#define RTE_PMD_USE_PREFETCH
#ifdef RTE_PMD_USE_PREFETCH
@@ -132,59 +115,6 @@ enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
}
-/* Find the offset to L5. This is needed by enic TSO implementation.
- * Return 0 if not a TCP packet or can't figure out the length.
- */
-static inline uint8_t tso_header_len(struct rte_mbuf *mbuf)
-{
- struct ether_hdr *eh;
- struct vlan_hdr *vh;
- struct ipv4_hdr *ip4;
- struct ipv6_hdr *ip6;
- struct tcp_hdr *th;
- uint8_t hdr_len;
- uint16_t ether_type;
-
- /* offset past Ethernet header */
- eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- ether_type = eh->ether_type;
- hdr_len = sizeof(struct ether_hdr);
- if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
- vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len);
- ether_type = vh->eth_proto;
- hdr_len += sizeof(struct vlan_hdr);
- }
-
- /* offset past IP header */
- switch (rte_be_to_cpu_16(ether_type)) {
- case ETHER_TYPE_IPv4:
- ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len);
- if (ip4->next_proto_id != IPPROTO_TCP)
- return 0;
- hdr_len += (ip4->version_ihl & 0xf) * 4;
- break;
- case ETHER_TYPE_IPv6:
- ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len);
- if (ip6->proto != IPPROTO_TCP)
- return 0;
- hdr_len += sizeof(struct ipv6_hdr);
- break;
- default:
- return 0;
- }
-
- if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len)
- return 0;
-
- /* offset past TCP header */
- th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len);
- hdr_len += (th->data_off >> 4) * 4;
-
- if (hdr_len > mbuf->pkt_len)
- return 0;
-
- return hdr_len;
-}
static inline uint8_t
enic_cq_rx_check_err(struct cq_desc *cqd)
@@ -230,17 +160,10 @@ static inline void
enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci;
- ciflags = enic_cq_rx_desc_ciflags(cqrd);
+ uint16_t bwflags, pkt_flags = 0, vlan_tci;
bwflags = enic_cq_rx_desc_bwflags(cqrd);
vlan_tci = enic_cq_rx_desc_vlan(cqrd);
- mbuf->ol_flags = 0;
-
- /* flags are meaningless if !EOP */
- if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
- goto mbuf_flags_done;
-
/* VLAN STRIPPED flag. The L2 packet type updated here also */
if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
@@ -272,20 +195,18 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
}
/* checksum flags */
- if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
- if (enic_cq_rx_desc_csum_not_calc(cqrd))
- pkt_flags |= (PKT_RX_IP_CKSUM_UNKNOWN &
- PKT_RX_L4_CKSUM_UNKNOWN);
- else {
+ if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
+ if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
uint32_t l4_flags;
l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
- else
+ else if (mbuf->packet_type & RTE_PTYPE_L3_IPV4)
pkt_flags |= PKT_RX_IP_CKSUM_BAD;
- if (l4_flags & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
+ if (l4_flags == RTE_PTYPE_L4_UDP ||
+ l4_flags == RTE_PTYPE_L4_TCP) {
if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
else
@@ -294,7 +215,6 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
}
}
- mbuf_flags_done:
mbuf->ol_flags = pkt_flags;
}
@@ -337,7 +257,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
while (nb_rx < nb_pkts) {
volatile struct rq_enet_desc *rqd_ptr;
- dma_addr_t dma_addr;
struct cq_desc cqd;
uint8_t packet_error;
uint16_t ciflags;
@@ -386,12 +305,13 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Push descriptor for newly allocated mbuf */
nmb->data_off = RTE_PKTMBUF_HEADROOM;
- dma_addr = (dma_addr_t)(nmb->buf_iova +
- RTE_PKTMBUF_HEADROOM);
- rq_enet_desc_enc(rqd_ptr, dma_addr,
- (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
- : RQ_ENET_TYPE_NOT_SOP),
- nmb->buf_len - RTE_PKTMBUF_HEADROOM);
+ /*
+ * Only the address needs to be refilled. length_type of the
+ * descriptor it set during initialization
+ * (enic_alloc_rx_queue_mbufs) and does not change.
+ */
+ rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
+ RTE_PKTMBUF_HEADROOM);
/* Fill in the rest of the mbuf */
seg_length = enic_cq_rx_desc_n_bytes(&cqd);
@@ -523,6 +443,38 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
return 0;
}
+uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint16_t i;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+ if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -580,8 +532,8 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
mss = 0;
- vlan_id = 0;
- vlan_tag_insert = 0;
+ vlan_id = tx_pkt->vlan_tci;
+ vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
bus_addr = (dma_addr_t)
(tx_pkt->buf_iova + tx_pkt->data_off);
@@ -593,7 +545,8 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
header_len = 0;
if (tso) {
- header_len = tso_header_len(tx_pkt);
+ header_len = tx_pkt->l2_len + tx_pkt->l3_len +
+ tx_pkt->l4_len;
/* Drop if non-TCP packet or TSO seg size is too big */
if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
@@ -620,10 +573,6 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
}
- if (ol_flags & PKT_TX_VLAN_PKT) {
- vlan_tag_insert = 1;
- vlan_id = tx_pkt->vlan_tci;
- }
wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
offload_mode, eop, eop, 0, vlan_tag_insert,
diff --git a/drivers/net/failsafe/Makefile b/drivers/net/failsafe/Makefile
index ea2a8fe4..bd2f0198 100644
--- a/drivers/net/failsafe/Makefile
+++ b/drivers/net/failsafe/Makefile
@@ -46,10 +46,17 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ops.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_ether.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_FAILSAFE) += failsafe_intr.c
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+CFLAGS += -DLINUX
+else
+CFLAGS += -DBSD
+endif
# No exported include files
# Basic CFLAGS:
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -std=gnu99 -Wextra
CFLAGS += -O3
CFLAGS += -I.
@@ -61,5 +68,6 @@ CFLAGS += -pedantic
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_vdev
+LDLIBS += -lpthread
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index 6bc5abac..c499bfb9 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -1,39 +1,11 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#include <rte_alarm.h>
#include <rte_malloc.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_vdev.h>
#include <rte_devargs.h>
#include <rte_kvargs.h>
@@ -46,7 +18,7 @@ static const struct rte_eth_link eth_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_UP,
- .link_autoneg = ETH_LINK_SPEED_AUTONEG,
+ .link_autoneg = ETH_LINK_AUTONEG,
};
static int
@@ -55,6 +27,7 @@ fs_sub_device_alloc(struct rte_eth_dev *dev,
{
uint8_t nb_subs;
int ret;
+ int i;
ret = failsafe_args_count_subdevice(dev, params);
if (ret)
@@ -72,6 +45,10 @@ fs_sub_device_alloc(struct rte_eth_dev *dev,
ERROR("Could not allocate sub_devices");
return -ENOMEM;
}
+ /* Initiate static sub devices linked list. */
+ for (i = 1; i < nb_subs; i++)
+ PRIV(dev)->subs[i - 1].next = PRIV(dev)->subs + i;
+ PRIV(dev)->subs[i - 1].next = PRIV(dev)->subs;
return 0;
}
@@ -108,16 +85,14 @@ failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev)
{
int ret = 0;
- if (PRIV(dev)->pending_alarm) {
- rte_errno = 0;
- rte_eal_alarm_cancel(fs_hotplug_alarm, dev);
- if (rte_errno) {
- ERROR("rte_eal_alarm_cancel failed (errno: %s)",
- strerror(rte_errno));
- ret = -rte_errno;
- } else {
- PRIV(dev)->pending_alarm = 0;
- }
+ rte_errno = 0;
+ rte_eal_alarm_cancel(fs_hotplug_alarm, dev);
+ if (rte_errno) {
+ ERROR("rte_eal_alarm_cancel failed (errno: %s)",
+ strerror(rte_errno));
+ ret = -rte_errno;
+ } else {
+ PRIV(dev)->pending_alarm = 0;
}
return ret;
}
@@ -138,17 +113,46 @@ fs_hotplug_alarm(void *arg)
break;
/* if we have non-probed device */
if (i != PRIV(dev)->subs_tail) {
+ if (fs_lock(dev, 1) != 0)
+ goto reinstall;
ret = failsafe_eth_dev_state_sync(dev);
+ fs_unlock(dev, 1);
if (ret)
ERROR("Unable to synchronize sub_device state");
}
failsafe_dev_remove(dev);
+reinstall:
ret = failsafe_hotplug_alarm_install(dev);
if (ret)
ERROR("Unable to set up next alarm");
}
static int
+fs_mutex_init(struct fs_priv *priv)
+{
+ int ret;
+ pthread_mutexattr_t attr;
+
+ ret = pthread_mutexattr_init(&attr);
+ if (ret) {
+ ERROR("Cannot initiate mutex attributes - %s", strerror(ret));
+ return ret;
+ }
+ /* Allow mutex relocks for the thread holding the mutex. */
+ ret = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ if (ret) {
+ ERROR("Cannot set mutex type - %s", strerror(ret));
+ return ret;
+ }
+ ret = pthread_mutex_init(&priv->hotplug_mutex, &attr);
+ if (ret) {
+ ERROR("Cannot initiate mutex - %s", strerror(ret));
+ return ret;
+ }
+ return 0;
+}
+
+static int
fs_eth_dev_create(struct rte_vdev_device *vdev)
{
struct rte_eth_dev *dev;
@@ -191,9 +195,19 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
ret = failsafe_args_parse(dev, params);
if (ret)
goto free_subs;
+ ret = rte_eth_dev_owner_new(&priv->my_owner.id);
+ if (ret) {
+ ERROR("Failed to get unique owner identifier");
+ goto free_args;
+ }
+ snprintf(priv->my_owner.name, sizeof(priv->my_owner.name),
+ FAILSAFE_OWNER_NAME);
ret = failsafe_eal_init(dev);
if (ret)
goto free_args;
+ ret = fs_mutex_init(priv);
+ if (ret)
+ goto free_args;
ret = failsafe_hotplug_alarm_install(dev);
if (ret) {
ERROR("Could not set up plug-in event detection");
@@ -239,6 +253,10 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
mac->addr_bytes[2], mac->addr_bytes[3],
mac->addr_bytes[4], mac->addr_bytes[5]);
dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ PRIV(dev)->intr_handle = (struct rte_intr_handle){
+ .fd = -1,
+ .type = RTE_INTR_HANDLE_EXT,
+ };
return 0;
free_args:
failsafe_args_free(dev);
@@ -264,6 +282,9 @@ fs_rte_eth_free(const char *name)
ERROR("Error while uninitializing sub-EAL");
failsafe_args_free(dev);
fs_sub_device_free(dev);
+ ret = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex);
+ if (ret)
+ ERROR("Error while destroying hotplug mutex");
rte_free(PRIV(dev));
rte_eth_dev_release_port(dev);
return ret;
diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c
index cfc83e36..366dbea1 100644
--- a/drivers/net/failsafe/failsafe_args.c
+++ b/drivers/net/failsafe/failsafe_args.c
@@ -1,37 +1,13 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
+#include <unistd.h>
#include <errno.h>
#include <rte_debug.h>
@@ -41,8 +17,6 @@
#include "failsafe_private.h"
-#define DEVARGS_MAXLEN 4096
-
/* Callback used when a new device is found in devargs */
typedef int (parse_cb)(struct rte_eth_dev *dev, const char *params,
uint8_t head);
@@ -161,6 +135,67 @@ ret_pclose:
}
static int
+fs_read_fd(struct sub_device *sdev, char *fd_str)
+{
+ FILE *fp = NULL;
+ int fd = -1;
+ /* store possible newline as well */
+ char output[DEVARGS_MAXLEN + 1];
+ int err = -ENODEV;
+ int oflags;
+ int lcount;
+
+ RTE_ASSERT(fd_str != NULL || sdev->fd_str != NULL);
+ if (sdev->fd_str == NULL) {
+ sdev->fd_str = strdup(fd_str);
+ if (sdev->fd_str == NULL) {
+ ERROR("Command line allocation failed");
+ return -ENOMEM;
+ }
+ }
+ errno = 0;
+ fd = strtol(fd_str, &fd_str, 0);
+ if (errno || *fd_str || fd < 0) {
+ ERROR("Parsing FD number failed");
+ goto error;
+ }
+ /* Fiddle with copy of file descriptor */
+ fd = dup(fd);
+ if (fd == -1)
+ goto error;
+ oflags = fcntl(fd, F_GETFL);
+ if (oflags == -1)
+ goto error;
+ if (fcntl(fd, F_SETFL, oflags | O_NONBLOCK) == -1)
+ goto error;
+ fp = fdopen(fd, "r");
+ if (fp == NULL)
+ goto error;
+ fd = -1;
+ /* Only take the last line into account */
+ lcount = 0;
+ while (fgets(output, sizeof(output), fp))
+ ++lcount;
+ if (lcount == 0)
+ goto error;
+ else if (ferror(fp) && errno != EAGAIN)
+ goto error;
+ /* Line must end with a newline character */
+ fs_sanitize_cmdline(output);
+ if (output[0] == '\0')
+ goto error;
+ err = fs_parse_device(sdev, output);
+ if (err)
+ ERROR("Parsing device '%s' failed", output);
+error:
+ if (fp)
+ fclose(fp);
+ if (fd != -1)
+ close(fd);
+ return err;
+}
+
+static int
fs_parse_device_param(struct rte_eth_dev *dev, const char *param,
uint8_t head)
{
@@ -202,6 +237,14 @@ fs_parse_device_param(struct rte_eth_dev *dev, const char *param,
}
if (ret)
goto free_args;
+ } else if (strncmp(param, "fd(", 3) == 0) {
+ ret = fs_read_fd(sdev, args);
+ if (ret == -ENODEV) {
+ DEBUG("Reading device info from FD failed");
+ ret = 0;
+ }
+ if (ret)
+ goto free_args;
} else {
ERROR("Unrecognized device type: %.*s", (int)b, param);
return -EINVAL;
@@ -407,8 +450,10 @@ failsafe_args_free(struct rte_eth_dev *dev)
uint8_t i;
FOREACH_SUBDEV(sdev, i, dev) {
- rte_free(sdev->cmdline);
+ free(sdev->cmdline);
sdev->cmdline = NULL;
+ free(sdev->fd_str);
+ sdev->fd_str = NULL;
free(sdev->devargs.args);
sdev->devargs.args = NULL;
}
@@ -424,7 +469,8 @@ fs_count_device(struct rte_eth_dev *dev, const char *param,
param[b] != '\0')
b++;
if (strncmp(param, "dev", b) != 0 &&
- strncmp(param, "exec", b) != 0) {
+ strncmp(param, "exec", b) != 0 &&
+ strncmp(param, "fd(", b) != 0) {
ERROR("Unrecognized device type: %.*s", (int)b, param);
return -EINVAL;
}
@@ -463,6 +509,8 @@ failsafe_args_parse_subs(struct rte_eth_dev *dev)
continue;
if (sdev->cmdline)
ret = fs_execute_cmd(sdev, sdev->cmdline);
+ else if (sdev->fd_str)
+ ret = fs_read_fd(sdev, sdev->fd_str);
else
ret = fs_parse_sub_device(sdev);
if (ret == 0)
diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c
index 19d26f53..c3d67312 100644
--- a/drivers/net/failsafe/failsafe_eal.c
+++ b/drivers/net/failsafe/failsafe_eal.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#include <rte_malloc.h>
@@ -36,39 +8,93 @@
#include "failsafe_private.h"
static int
+fs_ethdev_portid_get(const char *name, uint16_t *port_id)
+{
+ uint16_t pid;
+ size_t len;
+
+ if (name == NULL) {
+ DEBUG("Null pointer is specified\n");
+ return -EINVAL;
+ }
+ len = strlen(name);
+ RTE_ETH_FOREACH_DEV(pid) {
+ if (!strncmp(name, rte_eth_devices[pid].device->name, len)) {
+ *port_id = pid;
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static int
fs_bus_init(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
struct rte_devargs *da;
uint8_t i;
- uint16_t j;
+ uint16_t pid;
int ret;
FOREACH_SUBDEV(sdev, i, dev) {
if (sdev->state != DEV_PARSED)
continue;
da = &sdev->devargs;
- ret = rte_eal_hotplug_add(da->bus->name,
- da->name,
- da->args);
- if (ret) {
- ERROR("sub_device %d probe failed %s%s%s", i,
- rte_errno ? "(" : "",
- rte_errno ? strerror(rte_errno) : "",
- rte_errno ? ")" : "");
- continue;
- }
- RTE_ETH_FOREACH_DEV(j) {
- if (strcmp(rte_eth_devices[j].device->name,
- da->name) == 0) {
- ETH(sdev) = &rte_eth_devices[j];
- break;
+ if (fs_ethdev_portid_get(da->name, &pid) != 0) {
+ ret = rte_eal_hotplug_add(da->bus->name,
+ da->name,
+ da->args);
+ if (ret) {
+ ERROR("sub_device %d probe failed %s%s%s", i,
+ rte_errno ? "(" : "",
+ rte_errno ? strerror(rte_errno) : "",
+ rte_errno ? ")" : "");
+ continue;
}
+ if (fs_ethdev_portid_get(da->name, &pid) != 0) {
+ ERROR("sub_device %d init went wrong", i);
+ return -ENODEV;
+ }
+ } else {
+ char devstr[DEVARGS_MAXLEN] = "";
+ struct rte_devargs *probed_da =
+ rte_eth_devices[pid].device->devargs;
+
+ /* Take control of device probed by EAL options. */
+ free(da->args);
+ memset(da, 0, sizeof(*da));
+ if (probed_da != NULL)
+ snprintf(devstr, sizeof(devstr), "%s,%s",
+ probed_da->name, probed_da->args);
+ else
+ snprintf(devstr, sizeof(devstr), "%s",
+ rte_eth_devices[pid].device->name);
+ ret = rte_eal_devargs_parse(devstr, da);
+ if (ret) {
+ ERROR("Probed devargs parsing failed with code"
+ " %d", ret);
+ return ret;
+ }
+ INFO("Taking control of a probed sub device"
+ " %d named %s", i, da->name);
}
- if (ETH(sdev) == NULL) {
- ERROR("sub_device %d init went wrong", i);
- return -ENODEV;
+ ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner);
+ if (ret < 0) {
+ INFO("sub_device %d owner set failed (%s),"
+ " will try again later", i, strerror(-ret));
+ continue;
+ } else if (strncmp(rte_eth_devices[pid].device->name, da->name,
+ strlen(da->name)) != 0) {
+ /*
+ * The device probably was removed and its port id was
+ * reallocated before ownership set.
+ */
+ rte_eth_dev_owner_unset(pid, PRIV(dev)->my_owner.id);
+ INFO("sub_device %d was probably removed before taking"
+ " ownership, will try again later", i);
+ continue;
}
+ ETH(sdev) = &rte_eth_devices[pid];
SUB_ID(sdev) = i;
sdev->fs_dev = dev;
sdev->dev = ETH(sdev)->device;
diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
index 21392e5a..2c0bf936 100644
--- a/drivers/net/failsafe/failsafe_ether.c
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#include <unistd.h>
@@ -283,6 +255,7 @@ fs_dev_remove(struct sub_device *sdev)
return;
switch (sdev->state) {
case DEV_STARTED:
+ failsafe_rx_intr_uninstall_subdevice(sdev);
rte_eth_dev_stop(PORT_ID(sdev));
sdev->state = DEV_ACTIVE;
/* fallthrough */
@@ -297,7 +270,7 @@ fs_dev_remove(struct sub_device *sdev)
ERROR("Bus detach failed for sub_device %u",
SUB_ID(sdev));
} else {
- ETH(sdev)->state = RTE_ETH_DEV_UNUSED;
+ rte_eth_dev_release_port(ETH(sdev));
}
sdev->state = DEV_PARSED;
/* fallthrough */
@@ -307,6 +280,7 @@ fs_dev_remove(struct sub_device *sdev)
/* the end */
break;
}
+ sdev->remove = 0;
failsafe_hotplug_alarm_install(sdev->fs_dev);
}
@@ -354,8 +328,11 @@ failsafe_dev_remove(struct rte_eth_dev *dev)
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
if (sdev->remove && fs_rxtx_clean(sdev)) {
+ if (fs_lock(dev, 1) != 0)
+ return;
fs_dev_stats_save(sdev);
fs_dev_remove(sdev);
+ fs_unlock(dev, 1);
}
}
@@ -455,6 +432,7 @@ failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused,
{
struct sub_device *sdev = cb_arg;
+ fs_lock(sdev->fs_dev, 0);
/* Switch as soon as possible tx_dev. */
fs_switch_dev(sdev->fs_dev, sdev);
/* Use safe bursts in any case. */
@@ -464,6 +442,7 @@ failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused,
* the callback at the source of the current thread context.
*/
sdev->remove = 1;
+ fs_unlock(sdev->fs_dev, 0);
return 0;
}
@@ -480,7 +459,7 @@ failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused,
if (ret)
return _rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
else
return 0;
}
diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c
index 153ceeed..ec8c909b 100644
--- a/drivers/net/failsafe/failsafe_flow.c
+++ b/drivers/net/failsafe/failsafe_flow.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#include <sys/queue.h>
@@ -83,16 +55,19 @@ fs_flow_validate(struct rte_eth_dev *dev,
uint8_t i;
int ret;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Calling rte_flow_validate on sub_device %d", i);
ret = rte_flow_validate(PORT_ID(sdev),
attr, patterns, actions, error);
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("Operation rte_flow_validate failed for sub_device %d"
" with error %d", i, ret);
+ fs_unlock(dev, 0);
return ret;
}
}
+ fs_unlock(dev, 0);
return 0;
}
@@ -107,17 +82,19 @@ fs_flow_create(struct rte_eth_dev *dev,
struct rte_flow *flow;
uint8_t i;
+ fs_lock(dev, 0);
flow = fs_flow_allocate(attr, patterns, actions);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
flow->flows[i] = rte_flow_create(PORT_ID(sdev),
attr, patterns, actions, error);
- if (flow->flows[i] == NULL) {
+ if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) {
ERROR("Failed to create flow on sub_device %d",
i);
goto err;
}
}
TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next);
+ fs_unlock(dev, 0);
return flow;
err:
FOREACH_SUBDEV(sdev, i, dev) {
@@ -126,6 +103,7 @@ err:
flow->flows[i], error);
}
fs_flow_release(&flow);
+ fs_unlock(dev, 0);
return NULL;
}
@@ -143,6 +121,7 @@ fs_flow_destroy(struct rte_eth_dev *dev,
return -EINVAL;
}
ret = 0;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
int local_ret;
@@ -150,7 +129,7 @@ fs_flow_destroy(struct rte_eth_dev *dev,
continue;
local_ret = rte_flow_destroy(PORT_ID(sdev),
flow->flows[i], error);
- if (local_ret) {
+ if ((local_ret = fs_err(sdev, local_ret))) {
ERROR("Failed to destroy flow on sub_device %d: %d",
i, local_ret);
if (ret == 0)
@@ -159,6 +138,7 @@ fs_flow_destroy(struct rte_eth_dev *dev,
}
TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
fs_flow_release(&flow);
+ fs_unlock(dev, 0);
return ret;
}
@@ -172,12 +152,14 @@ fs_flow_flush(struct rte_eth_dev *dev,
uint8_t i;
int ret;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Calling rte_flow_flush on sub_device %d", i);
ret = rte_flow_flush(PORT_ID(sdev), error);
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("Operation rte_flow_flush failed for sub_device %d"
" with error %d", i, ret);
+ fs_unlock(dev, 0);
return ret;
}
}
@@ -185,6 +167,7 @@ fs_flow_flush(struct rte_eth_dev *dev,
TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next);
fs_flow_release(&flow);
}
+ fs_unlock(dev, 0);
return 0;
}
@@ -197,11 +180,19 @@ fs_flow_query(struct rte_eth_dev *dev,
{
struct sub_device *sdev;
+ fs_lock(dev, 0);
sdev = TX_SUBDEV(dev);
if (sdev != NULL) {
- return rte_flow_query(PORT_ID(sdev),
- flow->flows[SUB_ID(sdev)], type, arg, error);
+ int ret = rte_flow_query(PORT_ID(sdev),
+ flow->flows[SUB_ID(sdev)],
+ type, arg, error);
+
+ if ((ret = fs_err(sdev, ret))) {
+ fs_unlock(dev, 0);
+ return ret;
+ }
}
+ fs_unlock(dev, 0);
WARN("No active sub_device to query about its flow");
return -1;
}
@@ -215,6 +206,7 @@ fs_flow_isolate(struct rte_eth_dev *dev,
uint8_t i;
int ret;
+ fs_lock(dev, 0);
FOREACH_SUBDEV(sdev, i, dev) {
if (sdev->state < DEV_PROBED)
continue;
@@ -223,14 +215,16 @@ fs_flow_isolate(struct rte_eth_dev *dev,
WARN("flow isolation mode of sub_device %d in incoherent state.",
i);
ret = rte_flow_isolate(PORT_ID(sdev), set, error);
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("Operation rte_flow_isolate failed for sub_device %d"
" with error %d", i, ret);
+ fs_unlock(dev, 0);
return ret;
}
sdev->flow_isolated = set;
}
PRIV(dev)->flow_isolated = set;
+ fs_unlock(dev, 0);
return 0;
}
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
new file mode 100644
index 00000000..6b7f9c1a
--- /dev/null
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd.
+ */
+
+/**
+ * @file
+ * Interrupts handling for failsafe driver.
+ */
+
+#if defined(LINUX)
+#include <sys/epoll.h>
+#endif
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_config.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_interrupts.h>
+#include <rte_io.h>
+#include <rte_service_component.h>
+
+#include "failsafe_private.h"
+
+#define NUM_RX_PROXIES (FAILSAFE_MAX_ETHPORTS * RTE_MAX_RXTX_INTR_VEC_ID)
+
+
+/**
+ * Open an epoll file descriptor.
+ *
+ * @param flags
+ * Flags for defining epoll behavior.
+ * @return
+ * 0 on success, negative errno value otherwise.
+ */
+static int
+fs_epoll_create1(int flags)
+{
+#if defined(LINUX)
+ return epoll_create1(flags);
+#elif defined(BSD)
+ RTE_SET_USED(flags);
+ return -ENOTSUP;
+#endif
+}
+
+/**
+ * Install failsafe Rx event proxy service.
+ * The Rx event proxy is the service that listens to Rx events from the
+ * subdevices and triggers failsafe Rx events accordingly.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ * @return
+ * 0 on success, negative errno value otherwise.
+ */
+static int
+fs_rx_event_proxy_routine(void *data)
+{
+ struct fs_priv *priv;
+ struct rxq *rxq;
+ struct rte_epoll_event *events;
+ uint64_t u64;
+ int i, n;
+ int rc = 0;
+
+ u64 = 1;
+ priv = data;
+ events = priv->rxp.evec;
+ n = rte_epoll_wait(priv->rxp.efd, events, NUM_RX_PROXIES, -1);
+ for (i = 0; i < n; i++) {
+ rxq = events[i].epdata.data;
+ if (rxq->enable_events && rxq->event_fd != -1) {
+ if (write(rxq->event_fd, &u64, sizeof(u64)) !=
+ sizeof(u64)) {
+ ERROR("Failed to proxy Rx event to socket %d",
+ rxq->event_fd);
+ rc = -EIO;
+ }
+ }
+ }
+ return rc;
+}
+
+/**
+ * Uninstall failsafe Rx event proxy service.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ */
+static void
+fs_rx_event_proxy_service_uninstall(struct fs_priv *priv)
+{
+ /* Unregister the event service. */
+ switch (priv->rxp.sstate) {
+ case SS_RUNNING:
+ rte_service_map_lcore_set(priv->rxp.sid, priv->rxp.scid, 0);
+ /* fall through */
+ case SS_READY:
+ rte_service_runstate_set(priv->rxp.sid, 0);
+ rte_service_set_stats_enable(priv->rxp.sid, 0);
+ rte_service_component_runstate_set(priv->rxp.sid, 0);
+ /* fall through */
+ case SS_REGISTERED:
+ rte_service_component_unregister(priv->rxp.sid);
+ /* fall through */
+ default:
+ break;
+ }
+}
+
+/**
+ * Install the failsafe Rx event proxy service.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ * @return
+ * 0 on success, negative errno value otherwise.
+ */
+static int
+fs_rx_event_proxy_service_install(struct fs_priv *priv)
+{
+ struct rte_service_spec service;
+ int32_t num_service_cores;
+ int ret = 0;
+
+ num_service_cores = rte_service_lcore_count();
+ if (num_service_cores <= 0) {
+ ERROR("Failed to install Rx interrupts, "
+ "no service core found");
+ return -ENOTSUP;
+ }
+ /* prepare service info */
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ snprintf(service.name, sizeof(service.name), "%s_Rx_service",
+ priv->dev->data->name);
+ service.socket_id = priv->dev->data->numa_node;
+ service.callback = fs_rx_event_proxy_routine;
+ service.callback_userdata = priv;
+
+ if (priv->rxp.sstate == SS_NO_SERVICE) {
+ uint32_t service_core_list[num_service_cores];
+
+ /* get a service core to work with */
+ ret = rte_service_lcore_list(service_core_list,
+ num_service_cores);
+ if (ret <= 0) {
+ ERROR("Failed to install Rx interrupts, "
+ "service core list empty or corrupted");
+ return -ENOTSUP;
+ }
+ priv->rxp.scid = service_core_list[0];
+ ret = rte_service_lcore_add(priv->rxp.scid);
+ if (ret && ret != -EALREADY) {
+ ERROR("Failed adding service core");
+ return ret;
+ }
+ /* service core may be in "stopped" state, start it */
+ ret = rte_service_lcore_start(priv->rxp.scid);
+ if (ret && (ret != -EALREADY)) {
+ ERROR("Failed to install Rx interrupts, "
+ "service core not started");
+ return ret;
+ }
+ /* register our service */
+ int32_t ret = rte_service_component_register(&service,
+ &priv->rxp.sid);
+ if (ret) {
+ ERROR("service register() failed");
+ return -ENOEXEC;
+ }
+ priv->rxp.sstate = SS_REGISTERED;
+ /* run the service */
+ ret = rte_service_component_runstate_set(priv->rxp.sid, 1);
+ if (ret < 0) {
+ ERROR("Failed Setting component runstate\n");
+ return ret;
+ }
+ ret = rte_service_set_stats_enable(priv->rxp.sid, 1);
+ if (ret < 0) {
+ ERROR("Failed enabling stats\n");
+ return ret;
+ }
+ ret = rte_service_runstate_set(priv->rxp.sid, 1);
+ if (ret < 0) {
+ ERROR("Failed to run service\n");
+ return ret;
+ }
+ priv->rxp.sstate = SS_READY;
+ /* map the service with the service core */
+ ret = rte_service_map_lcore_set(priv->rxp.sid,
+ priv->rxp.scid, 1);
+ if (ret) {
+ ERROR("Failed to install Rx interrupts, "
+ "could not map service core");
+ return ret;
+ }
+ priv->rxp.sstate = SS_RUNNING;
+ }
+ return 0;
+}
+
+/**
+ * Install failsafe Rx event proxy subsystem.
+ * This is the way the failsafe PMD generates Rx events on behalf of its
+ * subdevices.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+fs_rx_event_proxy_install(struct fs_priv *priv)
+{
+ int rc = 0;
+
+ /*
+ * Create the epoll fd and event vector for the proxy service to
+ * wait on for Rx events generated by the subdevices.
+ */
+ priv->rxp.efd = fs_epoll_create1(0);
+ if (priv->rxp.efd < 0) {
+ rte_errno = errno;
+ ERROR("Failed to create epoll,"
+ " Rx interrupts will not be supported");
+ return -rte_errno;
+ }
+ priv->rxp.evec = calloc(NUM_RX_PROXIES, sizeof(*priv->rxp.evec));
+ if (priv->rxp.evec == NULL) {
+ ERROR("Failed to allocate memory for event vectors,"
+ " Rx interrupts will not be supported");
+ rc = -ENOMEM;
+ goto error;
+ }
+ rc = fs_rx_event_proxy_service_install(priv);
+ if (rc < 0)
+ goto error;
+ return 0;
+error:
+ if (priv->rxp.efd >= 0) {
+ close(priv->rxp.efd);
+ priv->rxp.efd = -1;
+ }
+ if (priv->rxp.evec != NULL) {
+ free(priv->rxp.evec);
+ priv->rxp.evec = NULL;
+ }
+ rte_errno = -rc;
+ return rc;
+}
+
+/**
+ * RX Interrupt control per subdevice.
+ *
+ * @param sdev
+ * Pointer to sub-device structure.
+ * @param op
+ * The operation be performed for the vector.
+ * Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+failsafe_eth_rx_intr_ctl_subdevice(struct sub_device *sdev, int op)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev *fsdev;
+ int epfd;
+ uint16_t pid;
+ uint16_t qid;
+ struct rxq *fsrxq;
+ int rc;
+ int ret = 0;
+
+ if (sdev == NULL || (ETH(sdev) == NULL) ||
+ sdev->fs_dev == NULL || (PRIV(sdev->fs_dev) == NULL)) {
+ ERROR("Called with invalid arguments");
+ return -EINVAL;
+ }
+ dev = ETH(sdev);
+ fsdev = sdev->fs_dev;
+ epfd = PRIV(sdev->fs_dev)->rxp.efd;
+ pid = PORT_ID(sdev);
+
+ if (epfd <= 0) {
+ if (op == RTE_INTR_EVENT_ADD) {
+ ERROR("Proxy events are not initialized");
+ return -EBADF;
+ } else {
+ return 0;
+ }
+ }
+ if (dev->data->nb_rx_queues > fsdev->data->nb_rx_queues) {
+ ERROR("subdevice has too many queues,"
+ " Interrupts will not be enabled");
+ return -E2BIG;
+ }
+ for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
+ fsrxq = fsdev->data->rx_queues[qid];
+ rc = rte_eth_dev_rx_intr_ctl_q(pid, qid, epfd,
+ op, (void *)fsrxq);
+ if (rc) {
+ ERROR("rte_eth_dev_rx_intr_ctl_q failed for "
+ "port %d queue %d, epfd %d, error %d",
+ pid, qid, epfd, rc);
+ ret = rc;
+ }
+ }
+ return ret;
+}
+
+/**
+ * Install Rx interrupts subsystem for a subdevice.
+ * This is a support for dynamically adding subdevices.
+ *
+ * @param sdev
+ * Pointer to subdevice structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int failsafe_rx_intr_install_subdevice(struct sub_device *sdev)
+{
+ int rc;
+ int qid;
+ struct rte_eth_dev *fsdev;
+ struct rxq **rxq;
+ const struct rte_intr_conf *const intr_conf =
+ &ETH(sdev)->data->dev_conf.intr_conf;
+
+ fsdev = sdev->fs_dev;
+ rxq = (struct rxq **)fsdev->data->rx_queues;
+ if (intr_conf->rxq == 0)
+ return 0;
+ rc = failsafe_eth_rx_intr_ctl_subdevice(sdev, RTE_INTR_EVENT_ADD);
+ if (rc)
+ return rc;
+ /* enable interrupts on already-enabled queues */
+ for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) {
+ if (rxq[qid]->enable_events) {
+ int ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev),
+ qid);
+ if (ret && (ret != -ENOTSUP)) {
+ ERROR("Failed to enable interrupts on "
+ "port %d queue %d", PORT_ID(sdev), qid);
+ rc = ret;
+ }
+ }
+ }
+ return rc;
+}
+
+/**
+ * Uninstall Rx interrupts subsystem for a subdevice.
+ * This is a support for dynamically removing subdevices.
+ *
+ * @param sdev
+ * Pointer to subdevice structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev)
+{
+ int qid;
+ struct rte_eth_dev *fsdev;
+ struct rxq *fsrxq;
+
+ fsdev = sdev->fs_dev;
+ for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) {
+ if (qid < fsdev->data->nb_rx_queues) {
+ fsrxq = fsdev->data->rx_queues[qid];
+ if (fsrxq->enable_events)
+ rte_eth_dev_rx_intr_disable(PORT_ID(sdev),
+ qid);
+ }
+ }
+ failsafe_eth_rx_intr_ctl_subdevice(sdev, RTE_INTR_EVENT_DEL);
+}
+
+/**
+ * Uninstall failsafe Rx event proxy.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ */
+static void
+fs_rx_event_proxy_uninstall(struct fs_priv *priv)
+{
+ fs_rx_event_proxy_service_uninstall(priv);
+ if (priv->rxp.evec != NULL) {
+ free(priv->rxp.evec);
+ priv->rxp.evec = NULL;
+ }
+ if (priv->rxp.efd > 0) {
+ close(priv->rxp.efd);
+ priv->rxp.efd = -1;
+ }
+}
+
+/**
+ * Uninstall failsafe interrupt vector.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ */
+static void
+fs_rx_intr_vec_uninstall(struct fs_priv *priv)
+{
+ struct rte_intr_handle *intr_handle;
+
+ intr_handle = &priv->intr_handle;
+ if (intr_handle->intr_vec != NULL) {
+ free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+ intr_handle->nb_efd = 0;
+}
+
+/**
+ * Installs failsafe interrupt vector to be registered with EAL later on.
+ *
+ * @param priv
+ * Pointer to failsafe private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+fs_rx_intr_vec_install(struct fs_priv *priv)
+{
+ unsigned int i;
+ unsigned int rxqs_n;
+ unsigned int n;
+ unsigned int count;
+ struct rte_intr_handle *intr_handle;
+
+ rxqs_n = priv->dev->data->nb_rx_queues;
+ n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ count = 0;
+ intr_handle = &priv->intr_handle;
+ RTE_ASSERT(intr_handle->intr_vec == NULL);
+ /* Allocate the interrupt vector of the failsafe Rx proxy interrupts */
+ intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
+ if (intr_handle->intr_vec == NULL) {
+ fs_rx_intr_vec_uninstall(priv);
+ rte_errno = ENOMEM;
+ ERROR("Failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -rte_errno;
+ }
+ for (i = 0; i < n; i++) {
+ struct rxq *rxq = priv->dev->data->rx_queues[i];
+
+ /* Skip queues that cannot request interrupts. */
+ if (rxq == NULL || rxq->event_fd < 0) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+ rte_errno = E2BIG;
+ ERROR("Too many Rx queues for interrupt vector size"
+ " (%d), Rx interrupts cannot be enabled",
+ RTE_MAX_RXTX_INTR_VEC_ID);
+ fs_rx_intr_vec_uninstall(priv);
+ return -rte_errno;
+ }
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = rxq->event_fd;
+ count++;
+ }
+ if (count == 0) {
+ fs_rx_intr_vec_uninstall(priv);
+ } else {
+ intr_handle->nb_efd = count;
+ intr_handle->efd_counter_size = sizeof(uint64_t);
+ }
+ return 0;
+}
+
+
+/**
+ * Uninstall failsafe Rx interrupts subsystem.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+void
+failsafe_rx_intr_uninstall(struct rte_eth_dev *dev)
+{
+ struct fs_priv *priv;
+ struct rte_intr_handle *intr_handle;
+
+ priv = PRIV(dev);
+ intr_handle = &priv->intr_handle;
+ rte_intr_free_epoll_fd(intr_handle);
+ fs_rx_event_proxy_uninstall(priv);
+ fs_rx_intr_vec_uninstall(priv);
+ dev->intr_handle = NULL;
+}
+
+/**
+ * Install failsafe Rx interrupts subsystem.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+failsafe_rx_intr_install(struct rte_eth_dev *dev)
+{
+ struct fs_priv *priv = PRIV(dev);
+ const struct rte_intr_conf *const intr_conf =
+ &priv->dev->data->dev_conf.intr_conf;
+
+ if (intr_conf->rxq == 0 || dev->intr_handle != NULL)
+ return 0;
+ if (fs_rx_intr_vec_install(priv) < 0)
+ return -rte_errno;
+ if (fs_rx_event_proxy_install(priv) < 0) {
+ fs_rx_intr_vec_uninstall(priv);
+ return -rte_errno;
+ }
+ dev->intr_handle = &priv->intr_handle;
+ return 0;
+}
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index e16a5903..057e435c 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -1,41 +1,15 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
+#include <stdbool.h>
#include <stdint.h>
+#include <unistd.h>
#include <rte_debug.h>
#include <rte_atomic.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_flow.h>
#include <rte_cycles.h>
@@ -71,12 +45,43 @@ static struct rte_eth_dev_info default_infos = {
*/
.rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_MACSEC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_SECURITY,
+ .rx_queue_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO,
- .tx_offload_capa = 0x0,
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_MACSEC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_SECURITY,
+ .tx_offload_capa =
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM,
.flow_type_rss_offloads = 0x0,
};
@@ -84,15 +89,29 @@ static int
fs_dev_configure(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
+ uint64_t supp_tx_offloads;
+ uint64_t tx_offloads;
uint8_t i;
int ret;
+ fs_lock(dev, 0);
+ supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+ if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
+ rte_errno = ENOTSUP;
+ ERROR("Some Tx offloads are not supported, "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ tx_offloads, supp_tx_offloads);
+ fs_unlock(dev, 0);
+ return -rte_errno;
+ }
FOREACH_SUBDEV(sdev, i, dev) {
int rmv_interrupt = 0;
int lsc_interrupt = 0;
int lsc_enabled;
- if (sdev->state != DEV_PROBED)
+ if (sdev->state != DEV_PROBED &&
+ !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE))
continue;
rmv_interrupt = ETH(sdev)->data->dev_flags &
@@ -115,13 +134,15 @@ fs_dev_configure(struct rte_eth_dev *dev)
dev->data->dev_conf.intr_conf.lsc = 0;
}
DEBUG("Configuring sub-device %d", i);
- sdev->remove = 0;
ret = rte_eth_dev_configure(PORT_ID(sdev),
dev->data->nb_rx_queues,
dev->data->nb_tx_queues,
&dev->data->dev_conf);
if (ret) {
+ if (!fs_err(sdev, ret))
+ continue;
ERROR("Could not configure sub_device %d", i);
+ fs_unlock(dev, 0);
return ret;
}
if (rmv_interrupt) {
@@ -148,6 +169,7 @@ fs_dev_configure(struct rte_eth_dev *dev)
}
if (PRIV(dev)->state < DEV_ACTIVE)
PRIV(dev)->state = DEV_ACTIVE;
+ fs_unlock(dev, 0);
return 0;
}
@@ -158,18 +180,37 @@ fs_dev_start(struct rte_eth_dev *dev)
uint8_t i;
int ret;
+ fs_lock(dev, 0);
+ ret = failsafe_rx_intr_install(dev);
+ if (ret) {
+ fs_unlock(dev, 0);
+ return ret;
+ }
FOREACH_SUBDEV(sdev, i, dev) {
if (sdev->state != DEV_ACTIVE)
continue;
DEBUG("Starting sub_device %d", i);
ret = rte_eth_dev_start(PORT_ID(sdev));
- if (ret)
+ if (ret) {
+ if (!fs_err(sdev, ret))
+ continue;
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ ret = failsafe_rx_intr_install_subdevice(sdev);
+ if (ret) {
+ if (!fs_err(sdev, ret))
+ continue;
+ rte_eth_dev_stop(PORT_ID(sdev));
+ fs_unlock(dev, 0);
return ret;
+ }
sdev->state = DEV_STARTED;
}
if (PRIV(dev)->state < DEV_STARTED)
PRIV(dev)->state = DEV_STARTED;
fs_switch_dev(dev, NULL);
+ fs_unlock(dev, 0);
return 0;
}
@@ -179,11 +220,15 @@ fs_dev_stop(struct rte_eth_dev *dev)
struct sub_device *sdev;
uint8_t i;
+ fs_lock(dev, 0);
PRIV(dev)->state = DEV_STARTED - 1;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
rte_eth_dev_stop(PORT_ID(sdev));
+ failsafe_rx_intr_uninstall_subdevice(sdev);
sdev->state = DEV_STARTED - 1;
}
+ failsafe_rx_intr_uninstall(dev);
+ fs_unlock(dev, 0);
}
static int
@@ -193,15 +238,18 @@ fs_dev_set_link_up(struct rte_eth_dev *dev)
uint8_t i;
int ret;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d"
" with error %d", i, ret);
+ fs_unlock(dev, 0);
return ret;
}
}
+ fs_unlock(dev, 0);
return 0;
}
@@ -212,15 +260,18 @@ fs_dev_set_link_down(struct rte_eth_dev *dev)
uint8_t i;
int ret;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d"
" with error %d", i, ret);
+ fs_unlock(dev, 0);
return ret;
}
}
+ fs_unlock(dev, 0);
return 0;
}
@@ -231,6 +282,7 @@ fs_dev_close(struct rte_eth_dev *dev)
struct sub_device *sdev;
uint8_t i;
+ fs_lock(dev, 0);
failsafe_hotplug_alarm_cancel(dev);
if (PRIV(dev)->state == DEV_STARTED)
dev->dev_ops->dev_stop(dev);
@@ -241,6 +293,26 @@ fs_dev_close(struct rte_eth_dev *dev)
sdev->state = DEV_ACTIVE - 1;
}
fs_dev_free_queues(dev);
+ fs_unlock(dev, 0);
+}
+
+static bool
+fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
+{
+ uint64_t port_offloads;
+ uint64_t queue_supp_offloads;
+ uint64_t port_supp_offloads;
+
+ port_offloads = dev->data->dev_conf.rxmode.offloads;
+ queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
+ port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
+ if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+ offloads)
+ return false;
+ /* Verify we have no conflict with port offloads */
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return false;
+ return true;
}
static void
@@ -255,11 +327,15 @@ fs_rx_queue_release(void *queue)
return;
rxq = queue;
dev = rxq->priv->dev;
+ fs_lock(dev, 0);
+ if (rxq->event_fd > 0)
+ close(rxq->event_fd);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
SUBOPS(sdev, rx_queue_release)
(ETH(sdev)->data->rx_queues[rxq->qid]);
dev->data->rx_queues[rxq->qid] = NULL;
rte_free(rxq);
+ fs_unlock(dev, 0);
}
static int
@@ -270,22 +346,48 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
+ /*
+ * FIXME: Add a proper interface in rte_eal_interrupts for
+ * allocating eventfd as an interrupt vector.
+ * For the time being, fake as if we are using MSIX interrupts,
+ * this will cause rte_intr_efd_enable to allocate an eventfd for us.
+ */
+ struct rte_intr_handle intr_handle = {
+ .type = RTE_INTR_HANDLE_VFIO_MSIX,
+ .efds = { -1, },
+ };
struct sub_device *sdev;
struct rxq *rxq;
uint8_t i;
int ret;
+ fs_lock(dev, 0);
rxq = dev->data->rx_queues[rx_queue_id];
if (rxq != NULL) {
fs_rx_queue_release(rxq);
dev->data->rx_queues[rx_queue_id] = NULL;
}
+ /* Verify application offloads are valid for our port and queue. */
+ if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
+ rte_errno = ENOTSUP;
+ ERROR("Rx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ PRIV(dev)->infos.rx_offload_capa |
+ PRIV(dev)->infos.rx_queue_offload_capa);
+ fs_unlock(dev, 0);
+ return -rte_errno;
+ }
rxq = rte_zmalloc(NULL,
sizeof(*rxq) +
sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
RTE_CACHE_LINE_SIZE);
- if (rxq == NULL)
+ if (rxq == NULL) {
+ fs_unlock(dev, 0);
return -ENOMEM;
+ }
FOREACH_SUBDEV(sdev, i, dev)
rte_atomic64_init(&rxq->refcnt[i]);
rxq->qid = rx_queue_id;
@@ -294,23 +396,127 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
rxq->info.conf = *rx_conf;
rxq->info.nb_desc = nb_rx_desc;
rxq->priv = PRIV(dev);
+ rxq->sdev = PRIV(dev)->subs;
+ ret = rte_intr_efd_enable(&intr_handle, 1);
+ if (ret < 0) {
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ rxq->event_fd = intr_handle.efds[0];
dev->data->rx_queues[rx_queue_id] = rxq;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
ret = rte_eth_rx_queue_setup(PORT_ID(sdev),
rx_queue_id,
nb_rx_desc, socket_id,
rx_conf, mb_pool);
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("RX queue setup failed for sub_device %d", i);
goto free_rxq;
}
}
+ fs_unlock(dev, 0);
return 0;
free_rxq:
fs_rx_queue_release(rxq);
+ fs_unlock(dev, 0);
return ret;
}
+static int
+fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct rxq *rxq;
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+ int rc = 0;
+
+ fs_lock(dev, 0);
+ if (idx >= dev->data->nb_rx_queues) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+ rxq = dev->data->rx_queues[idx];
+ if (rxq == NULL || rxq->event_fd <= 0) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+ /* Fail if proxy service is nor running. */
+ if (PRIV(dev)->rxp.sstate != SS_RUNNING) {
+ ERROR("failsafe interrupt services are not running");
+ rc = -EAGAIN;
+ goto unlock;
+ }
+ rxq->enable_events = 1;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx);
+ ret = fs_err(sdev, ret);
+ if (ret)
+ rc = ret;
+ }
+unlock:
+ fs_unlock(dev, 0);
+ if (rc)
+ rte_errno = -rc;
+ return rc;
+}
+
+static int
+fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct rxq *rxq;
+ struct sub_device *sdev;
+ uint64_t u64;
+ uint8_t i;
+ int rc = 0;
+ int ret;
+
+ fs_lock(dev, 0);
+ if (idx >= dev->data->nb_rx_queues) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+ rxq = dev->data->rx_queues[idx];
+ if (rxq == NULL || rxq->event_fd <= 0) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+ rxq->enable_events = 0;
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx);
+ ret = fs_err(sdev, ret);
+ if (ret)
+ rc = ret;
+ }
+ /* Clear pending events */
+ while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0)
+ ;
+unlock:
+ fs_unlock(dev, 0);
+ if (rc)
+ rte_errno = -rc;
+ return rc;
+}
+
+static bool
+fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
+{
+ uint64_t port_offloads;
+ uint64_t queue_supp_offloads;
+ uint64_t port_supp_offloads;
+
+ port_offloads = dev->data->dev_conf.txmode.offloads;
+ queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
+ port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
+ if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+ offloads)
+ return false;
+ /* Verify we have no conflict with port offloads */
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return false;
+ return true;
+}
+
static void
fs_tx_queue_release(void *queue)
{
@@ -323,11 +529,13 @@ fs_tx_queue_release(void *queue)
return;
txq = queue;
dev = txq->priv->dev;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
SUBOPS(sdev, tx_queue_release)
(ETH(sdev)->data->tx_queues[txq->qid]);
dev->data->tx_queues[txq->qid] = NULL;
rte_free(txq);
+ fs_unlock(dev, 0);
}
static int
@@ -342,17 +550,38 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
uint8_t i;
int ret;
+ fs_lock(dev, 0);
txq = dev->data->tx_queues[tx_queue_id];
if (txq != NULL) {
fs_tx_queue_release(txq);
dev->data->tx_queues[tx_queue_id] = NULL;
}
+ /*
+ * Don't verify queue offloads for applications which
+ * use the old API.
+ */
+ if (tx_conf != NULL &&
+ (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+ fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
+ rte_errno = ENOTSUP;
+ ERROR("Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ PRIV(dev)->infos.tx_offload_capa |
+ PRIV(dev)->infos.tx_queue_offload_capa);
+ fs_unlock(dev, 0);
+ return -rte_errno;
+ }
txq = rte_zmalloc("ethdev TX queue",
sizeof(*txq) +
sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
RTE_CACHE_LINE_SIZE);
- if (txq == NULL)
+ if (txq == NULL) {
+ fs_unlock(dev, 0);
return -ENOMEM;
+ }
FOREACH_SUBDEV(sdev, i, dev)
rte_atomic64_init(&txq->refcnt[i]);
txq->qid = tx_queue_id;
@@ -366,14 +595,16 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
tx_queue_id,
nb_tx_desc, socket_id,
tx_conf);
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("TX queue setup failed for sub_device %d", i);
goto free_txq;
}
}
+ fs_unlock(dev, 0);
return 0;
free_txq:
fs_tx_queue_release(txq);
+ fs_unlock(dev, 0);
return ret;
}
@@ -400,8 +631,10 @@ fs_promiscuous_enable(struct rte_eth_dev *dev)
struct sub_device *sdev;
uint8_t i;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
rte_eth_promiscuous_enable(PORT_ID(sdev));
+ fs_unlock(dev, 0);
}
static void
@@ -410,8 +643,10 @@ fs_promiscuous_disable(struct rte_eth_dev *dev)
struct sub_device *sdev;
uint8_t i;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
rte_eth_promiscuous_disable(PORT_ID(sdev));
+ fs_unlock(dev, 0);
}
static void
@@ -420,8 +655,10 @@ fs_allmulticast_enable(struct rte_eth_dev *dev)
struct sub_device *sdev;
uint8_t i;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
rte_eth_allmulticast_enable(PORT_ID(sdev));
+ fs_unlock(dev, 0);
}
static void
@@ -430,8 +667,10 @@ fs_allmulticast_disable(struct rte_eth_dev *dev)
struct sub_device *sdev;
uint8_t i;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
rte_eth_allmulticast_disable(PORT_ID(sdev));
+ fs_unlock(dev, 0);
}
static int
@@ -442,12 +681,15 @@ fs_link_update(struct rte_eth_dev *dev,
uint8_t i;
int ret;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Calling link_update on sub_device %d", i);
ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
- if (ret && ret != -1) {
+ if (ret && ret != -1 && sdev->remove == 0 &&
+ rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) {
ERROR("Link update failed for sub_device %d with error %d",
i, ret);
+ fs_unlock(dev, 0);
return ret;
}
}
@@ -459,9 +701,11 @@ fs_link_update(struct rte_eth_dev *dev,
l2 = &ETH(TX_SUBDEV(dev))->data->dev_link;
if (memcmp(l1, l2, sizeof(*l1))) {
*l1 = *l2;
+ fs_unlock(dev, 0);
return 0;
}
}
+ fs_unlock(dev, 0);
return -1;
}
@@ -469,25 +713,35 @@ static int
fs_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
{
+ struct rte_eth_stats backup;
struct sub_device *sdev;
uint8_t i;
int ret;
+ fs_lock(dev, 0);
rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
uint64_t *timestamp = &sdev->stats_snapshot.timestamp;
+ rte_memcpy(&backup, snapshot, sizeof(backup));
ret = rte_eth_stats_get(PORT_ID(sdev), snapshot);
if (ret) {
+ if (!fs_err(sdev, ret)) {
+ rte_memcpy(snapshot, &backup, sizeof(backup));
+ goto inc;
+ }
ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d",
i, ret);
*timestamp = 0;
+ fs_unlock(dev, 0);
return ret;
}
*timestamp = rte_rdtsc();
+inc:
failsafe_stats_increment(stats, snapshot);
}
+ fs_unlock(dev, 0);
return 0;
}
@@ -497,11 +751,13 @@ fs_stats_reset(struct rte_eth_dev *dev)
struct sub_device *sdev;
uint8_t i;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
rte_eth_stats_reset(PORT_ID(sdev));
memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats));
}
memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats));
+ fs_unlock(dev, 0);
}
/**
@@ -546,19 +802,26 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
rte_memcpy(&PRIV(dev)->infos, &default_infos,
sizeof(default_infos));
} else {
- uint32_t rx_offload_capa;
+ uint64_t rx_offload_capa;
+ uint64_t rxq_offload_capa;
rx_offload_capa = default_infos.rx_offload_capa;
+ rxq_offload_capa = default_infos.rx_queue_offload_capa;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
rte_eth_dev_info_get(PORT_ID(sdev),
&PRIV(dev)->infos);
rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
+ rxq_offload_capa &=
+ PRIV(dev)->infos.rx_queue_offload_capa;
}
sdev = TX_SUBDEV(dev);
rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
+ PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
PRIV(dev)->infos.tx_offload_capa &=
default_infos.tx_offload_capa;
+ PRIV(dev)->infos.tx_queue_offload_capa &=
+ default_infos.tx_queue_offload_capa;
PRIV(dev)->infos.flow_type_rss_offloads &=
default_infos.flow_type_rss_offloads;
}
@@ -570,14 +833,20 @@ fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
struct rte_eth_dev *edev;
+ const uint32_t *ret;
+ fs_lock(dev, 0);
sdev = TX_SUBDEV(dev);
- if (sdev == NULL)
- return NULL;
+ if (sdev == NULL) {
+ ret = NULL;
+ goto unlock;
+ }
edev = ETH(sdev);
/* ENOTSUP: counts as no supported ptypes */
- if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL)
- return NULL;
+ if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) {
+ ret = NULL;
+ goto unlock;
+ }
/*
* The API does not permit to do a clean AND of all ptypes,
* It is also incomplete by design and we do not really care
@@ -585,7 +854,10 @@ fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
* We just return the ptypes of the device of highest
* priority, usually the PREFERRED device.
*/
- return SUBOPS(sdev, dev_supported_ptypes_get)(edev);
+ ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev);
+unlock:
+ fs_unlock(dev, 0);
+ return ret;
}
static int
@@ -595,15 +867,18 @@ fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
uint8_t i;
int ret;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d",
i, ret);
+ fs_unlock(dev, 0);
return ret;
}
}
+ fs_unlock(dev, 0);
return 0;
}
@@ -614,15 +889,18 @@ fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
uint8_t i;
int ret;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d"
" with error %d", i, ret);
+ fs_unlock(dev, 0);
return ret;
}
}
+ fs_unlock(dev, 0);
return 0;
}
@@ -631,13 +909,22 @@ fs_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf)
{
struct sub_device *sdev;
+ int ret;
+ fs_lock(dev, 0);
sdev = TX_SUBDEV(dev);
- if (sdev == NULL)
- return 0;
- if (SUBOPS(sdev, flow_ctrl_get) == NULL)
- return -ENOTSUP;
- return SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
+ if (sdev == NULL) {
+ ret = 0;
+ goto unlock;
+ }
+ if (SUBOPS(sdev, flow_ctrl_get) == NULL) {
+ ret = -ENOTSUP;
+ goto unlock;
+ }
+ ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf);
+unlock:
+ fs_unlock(dev, 0);
+ return ret;
}
static int
@@ -648,15 +935,18 @@ fs_flow_ctrl_set(struct rte_eth_dev *dev,
uint8_t i;
int ret;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d"
" with error %d", i, ret);
+ fs_unlock(dev, 0);
return ret;
}
}
+ fs_unlock(dev, 0);
return 0;
}
@@ -666,6 +956,7 @@ fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
struct sub_device *sdev;
uint8_t i;
+ fs_lock(dev, 0);
/* No check: already done within the rte_eth_dev_mac_addr_remove
* call for the fail-safe device.
*/
@@ -673,6 +964,7 @@ fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
rte_eth_dev_mac_addr_remove(PORT_ID(sdev),
&dev->data->mac_addrs[index]);
PRIV(dev)->mac_addr_pool[index] = 0;
+ fs_unlock(dev, 0);
}
static int
@@ -686,11 +978,13 @@ fs_mac_addr_add(struct rte_eth_dev *dev,
uint8_t i;
RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %"
PRIu8 " with error %d", i, ret);
+ fs_unlock(dev, 0);
return ret;
}
}
@@ -699,6 +993,7 @@ fs_mac_addr_add(struct rte_eth_dev *dev,
PRIV(dev)->nb_mac_addr = index;
}
PRIV(dev)->mac_addr_pool[index] = vmdq;
+ fs_unlock(dev, 0);
return 0;
}
@@ -708,8 +1003,10 @@ fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
struct sub_device *sdev;
uint8_t i;
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
+ fs_unlock(dev, 0);
}
static int
@@ -727,15 +1024,18 @@ fs_filter_ctrl(struct rte_eth_dev *dev,
*(const void **)arg = &fs_flow_ops;
return 0;
}
+ fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Calling rte_eth_dev_filter_ctrl on sub_device %d", i);
ret = rte_eth_dev_filter_ctrl(PORT_ID(sdev), type, op, arg);
- if (ret) {
+ if ((ret = fs_err(sdev, ret))) {
ERROR("Operation rte_eth_dev_filter_ctrl failed for sub_device %d"
" with error %d", i, ret);
+ fs_unlock(dev, 0);
return ret;
}
}
+ fs_unlock(dev, 0);
return 0;
}
@@ -761,6 +1061,8 @@ const struct eth_dev_ops failsafe_ops = {
.tx_queue_setup = fs_tx_queue_setup,
.rx_queue_release = fs_rx_queue_release,
.tx_queue_release = fs_tx_queue_release,
+ .rx_queue_intr_enable = fs_rx_intr_enable,
+ .rx_queue_intr_disable = fs_rx_intr_disable,
.flow_ctrl_get = fs_flow_ctrl_get,
.flow_ctrl_set = fs_flow_ctrl_set,
.mac_addr_remove = fs_mac_addr_remove,
diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h
index d81cc3ca..2d16ba4c 100644
--- a/drivers/net/failsafe/failsafe_private.h
+++ b/drivers/net/failsafe/failsafe_private.h
@@ -1,53 +1,29 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
#define _RTE_ETH_FAILSAFE_PRIVATE_H_
#include <sys/queue.h>
+#include <pthread.h>
#include <rte_atomic.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_devargs.h>
+#include <rte_interrupts.h>
#define FAILSAFE_DRIVER_NAME "Fail-safe PMD"
+#define FAILSAFE_OWNER_NAME "Fail-safe"
#define PMD_FAILSAFE_MAC_KVARG "mac"
#define PMD_FAILSAFE_HOTPLUG_POLL_KVARG "hotplug_poll"
#define PMD_FAILSAFE_PARAM_STRING \
"dev(<ifc>)," \
"exec(<shell command>)," \
+ "fd(<fd number>)," \
"mac=mac_addr," \
"hotplug_poll=u64" \
""
@@ -57,14 +33,37 @@
#define FAILSAFE_MAX_ETHPORTS 2
#define FAILSAFE_MAX_ETHADDR 128
+#define DEVARGS_MAXLEN 4096
+
+enum rxp_service_state {
+ SS_NO_SERVICE = 0,
+ SS_REGISTERED,
+ SS_READY,
+ SS_RUNNING,
+};
+
/* TYPES */
+struct rx_proxy {
+ /* epoll file descriptor */
+ int efd;
+ /* event vector to be used by epoll */
+ struct rte_epoll_event *evec;
+ /* rte service id */
+ uint32_t sid;
+ /* service core id */
+ uint32_t scid;
+ enum rxp_service_state sstate;
+};
+
struct rxq {
struct fs_priv *priv;
uint16_t qid;
- /* id of last sub_device polled */
- uint8_t last_polled;
+ /* next sub_device to poll */
+ struct sub_device *sdev;
unsigned int socket_id;
+ int event_fd;
+ unsigned int enable_events:1;
struct rte_eth_rxq_info info;
rte_atomic64_t refcnt[];
};
@@ -100,6 +99,7 @@ struct fs_stats {
struct sub_device {
/* Exhaustive DPDK device description */
+ struct sub_device *next;
struct rte_devargs devargs;
struct rte_bus *bus;
struct rte_device *dev;
@@ -111,6 +111,8 @@ struct sub_device {
struct fs_stats stats_snapshot;
/* Some device are defined as a command line */
char *cmdline;
+ /* Others are retrieved through a file descriptor */
+ char *fd_str;
/* fail-safe device backreference */
struct rte_eth_dev *fs_dev;
/* flag calling for recollection */
@@ -139,6 +141,8 @@ struct fs_priv {
uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR];
/* current capabilities */
struct rte_eth_dev_info infos;
+ struct rte_eth_dev_owner my_owner; /* Unique owner. */
+ struct rte_intr_handle intr_handle; /* Port interrupt handle. */
/*
* Fail-safe state machine.
* This level will be tracking state of the EAL and eth
@@ -148,11 +152,31 @@ struct fs_priv {
*/
enum dev_state state;
struct rte_eth_stats stats_accumulator;
+ /*
+ * Rx interrupts/events proxy.
+ * The PMD issues Rx events to the EAL on behalf of its subdevices,
+ * it does that by registering an event-fd for each of its queues with
+ * the EAL. A PMD service thread listens to all the Rx events from the
+ * subdevices, when an Rx event is issued by a subdevice it will be
+ * caught by this service with will trigger an Rx event in the
+ * appropriate failsafe Rx queue.
+ */
+ struct rx_proxy rxp;
+ pthread_mutex_t hotplug_mutex;
+ /* Hot-plug mutex is locked by the alarm mechanism. */
+ volatile unsigned int alarm_lock:1;
unsigned int pending_alarm:1; /* An alarm is pending */
/* flow isolation state */
int flow_isolated:1;
};
+/* FAILSAFE_INTR */
+
+int failsafe_rx_intr_install(struct rte_eth_dev *dev);
+void failsafe_rx_intr_uninstall(struct rte_eth_dev *dev);
+int failsafe_rx_intr_install_subdevice(struct sub_device *sdev);
+void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev);
+
/* MISC */
int failsafe_hotplug_alarm_install(struct rte_eth_dev *dev);
@@ -269,13 +293,13 @@ extern int mac_from_arg;
* a: (rte_atomic64_t)
*/
#define FS_ATOMIC_P(a) \
- rte_atomic64_add(&(a), 1)
+ rte_atomic64_set(&(a), 1)
/**
* a: (rte_atomic64_t)
*/
#define FS_ATOMIC_V(a) \
- rte_atomic64_sub(&(a), 1)
+ rte_atomic64_set(&(a), 0)
/**
* s: (struct sub_device *)
@@ -294,6 +318,14 @@ extern int mac_from_arg;
&((struct txq *)((s)->fs_dev->data->tx_queues[i]))->refcnt[(s)->sid] \
)
+#ifdef RTE_EXEC_ENV_BSDAPP
+#define FS_THREADID_TYPE void*
+#define FS_THREADID_FMT "p"
+#else
+#define FS_THREADID_TYPE unsigned long
+#define FS_THREADID_FMT "lu"
+#endif
+
#define LOG__(level, m, ...) \
RTE_LOG(level, PMD, "net_failsafe: " m "%c", __VA_ARGS__)
#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
@@ -327,6 +359,59 @@ fs_find_next(struct rte_eth_dev *dev,
}
/*
+ * Lock hot-plug mutex.
+ * is_alarm means that the caller is, for sure, the hot-plug alarm mechanism.
+ */
+static inline int
+fs_lock(struct rte_eth_dev *dev, unsigned int is_alarm)
+{
+ int ret;
+
+ if (is_alarm) {
+ ret = pthread_mutex_trylock(&PRIV(dev)->hotplug_mutex);
+ if (ret) {
+ DEBUG("Hot-plug mutex lock trying failed(%s), will try"
+ " again later...", strerror(ret));
+ return ret;
+ }
+ PRIV(dev)->alarm_lock = 1;
+ } else {
+ ret = pthread_mutex_lock(&PRIV(dev)->hotplug_mutex);
+ if (ret) {
+ ERROR("Cannot lock mutex(%s)", strerror(ret));
+ return ret;
+ }
+ }
+ DEBUG("Hot-plug mutex was locked by thread %" FS_THREADID_FMT "%s",
+ (FS_THREADID_TYPE)pthread_self(),
+ PRIV(dev)->alarm_lock ? " by the hot-plug alarm" : "");
+ return ret;
+}
+
+/*
+ * Unlock hot-plug mutex.
+ * is_alarm means that the caller is, for sure, the hot-plug alarm mechanism.
+ */
+static inline void
+fs_unlock(struct rte_eth_dev *dev, unsigned int is_alarm)
+{
+ int ret;
+ unsigned int prev_alarm_lock = PRIV(dev)->alarm_lock;
+
+ if (is_alarm) {
+ RTE_ASSERT(PRIV(dev)->alarm_lock == 1);
+ PRIV(dev)->alarm_lock = 0;
+ }
+ ret = pthread_mutex_unlock(&PRIV(dev)->hotplug_mutex);
+ if (ret)
+ ERROR("Cannot unlock hot-plug mutex(%s)", strerror(ret));
+ else
+ DEBUG("Hot-plug mutex was unlocked by thread %" FS_THREADID_FMT "%s",
+ (FS_THREADID_TYPE)pthread_self(),
+ prev_alarm_lock ? " by the hot-plug alarm" : "");
+}
+
+/*
* Switch emitting device.
* If banned is set, banned must not be considered for
* the role of emitting device.
@@ -375,4 +460,15 @@ fs_switch_dev(struct rte_eth_dev *dev,
rte_wmb();
}
+/*
+ * Adjust error value and rte_errno to the fail-safe actual error value.
+ */
+static inline int
+fs_err(struct sub_device *sdev, int err)
+{
+ /* A device removal shouldn't be reported as an error. */
+ if (sdev->remove == 1 || err == -EIO)
+ return rte_errno = 0;
+ return err;
+}
#endif /* _RTE_ETH_FAILSAFE_PRIVATE_H_ */
diff --git a/drivers/net/failsafe/failsafe_rxtx.c b/drivers/net/failsafe/failsafe_rxtx.c
index 70157c82..363cf7ba 100644
--- a/drivers/net/failsafe/failsafe_rxtx.c
+++ b/drivers/net/failsafe/failsafe_rxtx.c
@@ -1,40 +1,12 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#include <rte_atomic.h>
#include <rte_debug.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "failsafe_private.h"
@@ -94,36 +66,28 @@ failsafe_rx_burst(void *queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct fs_priv *priv;
struct sub_device *sdev;
struct rxq *rxq;
void *sub_rxq;
uint16_t nb_rx;
- uint8_t nb_polled, nb_subs;
- uint8_t i;
rxq = queue;
- priv = rxq->priv;
- nb_subs = priv->subs_tail - priv->subs_head;
- nb_polled = 0;
- for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) {
- i++;
- if (i == priv->subs_tail)
- i = priv->subs_head;
- sdev = &priv->subs[i];
- if (unlikely(fs_rx_unsafe(sdev)))
+ sdev = rxq->sdev;
+ do {
+ if (fs_rx_unsafe(sdev)) {
+ nb_rx = 0;
+ sdev = sdev->next;
continue;
+ }
sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
nb_rx = ETH(sdev)->
rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
- if (nb_rx) {
- rxq->last_polled = i;
- return nb_rx;
- }
- }
- return 0;
+ sdev = sdev->next;
+ } while (nb_rx == 0 && sdev != rxq->sdev);
+ rxq->sdev = sdev;
+ return nb_rx;
}
uint16_t
@@ -131,35 +95,24 @@ failsafe_rx_burst_fast(void *queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct fs_priv *priv;
struct sub_device *sdev;
struct rxq *rxq;
void *sub_rxq;
uint16_t nb_rx;
- uint8_t nb_polled, nb_subs;
- uint8_t i;
rxq = queue;
- priv = rxq->priv;
- nb_subs = priv->subs_tail - priv->subs_head;
- nb_polled = 0;
- for (i = rxq->last_polled; nb_polled < nb_subs; nb_polled++) {
- i++;
- if (i == priv->subs_tail)
- i = priv->subs_head;
- sdev = &priv->subs[i];
+ sdev = rxq->sdev;
+ do {
RTE_ASSERT(!fs_rx_unsafe(sdev));
sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
nb_rx = ETH(sdev)->
rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
- if (nb_rx) {
- rxq->last_polled = i;
- return nb_rx;
- }
- }
- return 0;
+ sdev = sdev->next;
+ } while (nb_rx == 0 && sdev != rxq->sdev);
+ rxq->sdev = sdev;
+ return nb_rx;
}
uint16_t
diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile
index 1129596f..b059a700 100644
--- a/drivers/net/fm10k/Makefile
+++ b/drivers/net/fm10k/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2013-2015 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/fm10k/base/fm10k_mbx.c b/drivers/net/fm10k/base/fm10k_mbx.c
index 16ab98d3..e766e45c 100644
--- a/drivers/net/fm10k/base/fm10k_mbx.c
+++ b/drivers/net/fm10k/base/fm10k_mbx.c
@@ -850,7 +850,7 @@ STATIC s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
- * This function copies the message from the the message array to mbmem
+ * This function copies the message from the message array to mbmem
**/
STATIC void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
{
diff --git a/drivers/net/fm10k/base/meson.build b/drivers/net/fm10k/base/meson.build
new file mode 100644
index 00000000..a8fc5fa8
--- /dev/null
+++ b/drivers/net/fm10k/base/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = [
+ 'fm10k_api.c',
+ 'fm10k_common.c',
+ 'fm10k_mbx.c',
+ 'fm10k_pf.c',
+ 'fm10k_tlv.c',
+ 'fm10k_vf.c'
+]
+
+error_cflags = ['-Wno-unused-parameter', '-Wno-unused-value',
+ '-Wno-strict-aliasing', '-Wno-format-extra-args',
+ '-Wno-unused-variable', '-Wno-missing-field-initializers'
+]
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('fm10k_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 1273aa86..30dad3e2 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
*/
#ifndef _FM10K_H_
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 2d05a466..94237610 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -1,37 +1,8 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2016 Intel Corporation
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
@@ -54,7 +25,7 @@
/* Wait interval to get switch status */
#define WAIT_SWITCH_MSG_US 100000
/* A period of quiescence for switch */
-#define FM10K_SWITCH_QUIESCE_US 10000
+#define FM10K_SWITCH_QUIESCE_US 100000
/* Number of chars per uint32 type */
#define CHARS_PER_UINT32 (sizeof(uint32_t))
#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
@@ -69,6 +40,9 @@
#define GLORT_FD_MASK GLORT_PF_MASK
#define GLORT_FD_INDEX GLORT_FD_Q_BASE
+int fm10k_logtype_init;
+int fm10k_logtype_driver;
+
static void fm10k_close_mbx_service(struct fm10k_hw *hw);
static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
@@ -533,9 +507,8 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev)
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
};
- if (dev->data->nb_rx_queues == 1 ||
- dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
- dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
+ if (dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
+ dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
return;
}
@@ -1242,7 +1215,7 @@ fm10k_dev_close(struct rte_eth_dev *dev)
MAX_LPORT_NUM, false);
fm10k_mbx_unlock(hw);
- /* allow 10ms for device to quiesce */
+ /* allow 100ms for device to quiesce */
rte_delay_us(FM10K_SWITCH_QUIESCE_US);
/* Stop mailbox service first */
@@ -2625,7 +2598,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
dev_info->sm_down = 0;
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
}
@@ -2638,7 +2611,7 @@ fm10k_dev_interrupt_handler_pf(void *param)
PMD_INIT_LOG(INFO, "INT: Switch is down");
dev_info->sm_down = 1;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
/* Handle SRAM error */
@@ -2706,7 +2679,7 @@ fm10k_dev_interrupt_handler_vf(void *param)
/* Setting reset flag */
dev_info->sm_down = 1;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
if (dev_info->sm_down == 1 &&
@@ -2735,7 +2708,7 @@ fm10k_dev_interrupt_handler_vf(void *param)
dev_info->sm_down = 0;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
/* Re-enable interrupt from device side */
@@ -3263,3 +3236,15 @@ static struct rte_pci_driver rte_pmd_fm10k = {
RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(fm10k_init_log);
+static void
+fm10k_init_log(void)
+{
+ fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
+ if (fm10k_logtype_init >= 0)
+ rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE);
+ fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver");
+ if (fm10k_logtype_driver >= 0)
+ rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/fm10k/fm10k_logs.h b/drivers/net/fm10k/fm10k_logs.h
index 31384c9e..df56a51c 100644
--- a/drivers/net/fm10k/fm10k_logs.h
+++ b/drivers/net/fm10k/fm10k_logs.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
*/
#ifndef _FM10K_LOGS_H_
@@ -36,14 +7,12 @@
#include <rte_log.h>
+extern int fm10k_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+ rte_log(RTE_LOG_ ## level, fm10k_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
-#ifdef RTE_LIBRTE_FM10K_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -66,12 +35,10 @@
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
#endif
-#ifdef RTE_LIBRTE_FM10K_DEBUG_DRIVER
+extern int fm10k_logtype_driver;
#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, fm10k_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index d6081e48..9320748c 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -1,39 +1,10 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2016 Intel Corporation
*/
#include <inttypes.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_common.h>
#include <rte_net.h>
#include "fm10k.h"
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index ce042d3d..498a1781 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -1,39 +1,10 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2015 Intel Corporation
*/
#include <inttypes.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_common.h>
#include "fm10k.h"
#include "base/fm10k_type.h"
diff --git a/drivers/net/fm10k/meson.build b/drivers/net/fm10k/meson.build
new file mode 100644
index 00000000..2772ea4d
--- /dev/null
+++ b/drivers/net/fm10k/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'fm10k_ethdev.c',
+ 'fm10k_rxtx.c',
+)
+if arch_subdir == 'x86'
+ dpdk_conf.set('RTE_LIBRTE_FM10K_INC_VECTOR', 1)
+ sources += files('fm10k_rxtx_vec.c')
+endif
+
+includes += include_directories('base')
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 9ab8c84d..5663f5b1 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2017 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
@@ -114,6 +86,25 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c
+ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
+ CC_AVX2_SUPPORT=1
+else
+ CC_AVX2_SUPPORT=\
+ $(shell $(CC) -march=core-avx2 -dM -E - </dev/null 2>&1 | \
+ grep -q AVX2 && echo 1)
+ ifeq ($(CC_AVX2_SUPPORT), 1)
+ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+ CFLAGS_i40e_rxtx_vec_avx2.o += -march=core-avx2
+ else
+ CFLAGS_i40e_rxtx_vec_avx2.o += -mavx2
+ endif
+ endif
+endif
+
+ifeq ($(CC_AVX2_SUPPORT), 1)
+ SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_avx2.c
+endif
+
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_I40E_PMD)-include := rte_pmd_i40e.h
diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README
index 59e76c21..247ba11d 100644
--- a/drivers/net/i40e/base/README
+++ b/drivers/net/i40e/base/README
@@ -34,7 +34,7 @@ Intel® I40E driver
==================
This directory contains source code of FreeBSD i40e driver of version
-cid-i40e.2017.06.23.tar.gz released by the team which develops
+cid-i40e.2018.01.02.tar.gz released by the team which develops
basic drivers for any i40e NIC. The directory of base/ contains the
original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index 8cc8c5ec..612be883 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -688,12 +688,18 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
(hw->aq.api_min_ver >= 7)))
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
- if (hw->mac.type == I40E_MAC_XL710 &&
+ if (hw->mac.type == I40E_MAC_XL710 &&
hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
}
+ /* Newer versions of firmware require lock when reading the NVM */
+ if ((hw->aq.api_maj_ver > 1) ||
+ ((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver >= 5)))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
@@ -998,10 +1004,19 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+#ifdef PF_DRIVER
+ if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+#else
+ if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+#endif
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
}
asq_send_command_error:
@@ -1063,22 +1078,19 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
-#ifdef PF_DRIVER
#ifdef INTEGRATED_VF
if (!i40e_is_vf(hw))
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+ else
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
#else
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
-#endif /* INTEGRATED_VF */
+#ifdef PF_DRIVER
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
#endif /* PF_DRIVER */
#ifdef VF_DRIVER
-#ifdef INTEGRATED_VF
- if (i40e_is_vf(hw))
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
-#else
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
-#endif /* INTEGRATED_VF */
+ ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
#endif /* VF_DRIVER */
+#endif /* INTEGRATED_VF */
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -1137,7 +1149,7 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_use = ntu;
#ifdef PF_DRIVER
- i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
+ i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
#endif /* PF_DRIVER */
clean_arq_element_out:
/* Set pending if needed, unlock and return */
diff --git a/drivers/net/i40e/base/i40e_adminq.h b/drivers/net/i40e/base/i40e_adminq.h
index 182e40b9..de4ab3f3 100644
--- a/drivers/net/i40e/base/i40e_adminq.h
+++ b/drivers/net/i40e/base/i40e_adminq.h
@@ -159,9 +159,6 @@ STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
-#ifdef I40E_ESS_SUPPORT
-#define I40E_ASQ_CMD_TIMEOUT_ESS 50000000 /* usecs */
-#endif
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index c36da2a3..801c0ff1 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -214,6 +214,7 @@ enum i40e_admin_queue_opc {
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@@ -262,6 +263,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_nvm_update = 0x0703,
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_nvm_progress = 0x0706,
i40e_aqc_opc_oem_post_update = 0x0720,
i40e_aqc_opc_thermal_sensor = 0x0721,
@@ -1877,6 +1879,7 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_AOC = 0x23,
I40E_PHY_TYPE_25GBASE_ACC = 0x24,
I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,
I40E_PHY_TYPE_DEFAULT = 0xFF,
};
@@ -2182,8 +2185,8 @@ struct i40e_aqc_phy_register_access {
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
u8 dev_addres;
u8 reserved1[2];
- u32 reg_address;
- u32 reg_value;
+ __le32 reg_address;
+ __le32 reg_value;
u8 reserved2[4];
};
@@ -2195,8 +2198,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
+#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;
@@ -2456,6 +2463,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+/* Set DCB (direct 0x0303) */
+struct i40e_aqc_set_dcb_parameters {
+ u8 command;
+#define I40E_AQ_DCB_SET_AGENT 0x1
+#define I40E_DCB_VALID 0x1
+ u8 valid_flags;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
+
/* Get CEE DCBX Oper Config (0x0A07)
* uses the generic descriptor struct
* returns below as indirect response
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 900d379c..e0a5be14 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -310,6 +310,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -1037,7 +1039,8 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
status = i40e_init_nvm(hw);
return status;
@@ -1393,7 +1396,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
break;
reg2 = rd32(hw, I40E_GLGEN_RSTAT);
if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
- DEBUGOUT("Core reset upcoming.\n");
+ DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n");
DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
return I40E_ERR_NOT_READY;
}
@@ -1585,6 +1588,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1633,6 +1637,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
+ case I40E_LINK_ACTIVITY:
continue;
default:
break;
@@ -1643,9 +1648,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
- if (mode == I40E_LINK_ACTIVITY)
- blink = false;
-
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
@@ -1675,31 +1677,47 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
{
struct i40e_aq_desc desc;
enum i40e_status_code status;
+ u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
if (!abilities)
return I40E_ERR_PARAM;
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_phy_abilities);
+ do {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
- if (abilities_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- if (qualified_modules)
- desc.params.external.param0 |=
+ if (qualified_modules)
+ desc.params.external.param0 |=
CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
- if (report_init)
- desc.params.external.param0 |=
+ if (report_init)
+ desc.params.external.param0 |=
CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
- status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
- cmd_details);
+ status = i40e_asq_send_command(hw, &desc, abilities,
+ abilities_size, cmd_details);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
- status = I40E_ERR_UNKNOWN_PHY;
+ if (status != I40E_SUCCESS)
+ break;
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ i40e_msec_delay(1);
+ total_delay++;
+ status = I40E_ERR_TIMEOUT;
+ }
+ } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
+ (total_delay < max_delay));
+
+ if (status != I40E_SUCCESS)
+ return status;
if (report_init) {
if (hw->mac.type == I40E_MAC_XL710 &&
@@ -1753,6 +1771,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
/**
* i40e_set_fc
* @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
*
* Set the requested flow control mode using set_phy_config.
**/
@@ -2005,7 +2025,11 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= 7) {
- hw->phy.phy_types = LE32_TO_CPU(*(__le32 *)resp->link_type);
+ __le32 tmp;
+
+ i40e_memcpy(&tmp, resp->link_type, sizeof(tmp),
+ I40E_NONDMA_TO_NONDMA);
+ hw->phy.phy_types = LE32_TO_CPU(tmp);
hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
}
@@ -3107,8 +3131,8 @@ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
* VEBs/VEPA elements only
@@ -3168,8 +3192,8 @@ static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
**/
@@ -3199,8 +3223,8 @@ enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
* add_mirrorrule.
* @mr_list: list of mirrored VLAN IDs to be removed
* @cmd_details: pointer to command details structure or NULL
- * @rule_used: Number of rules used in internal switch
- * @rule_free: Number of rules free in internal switch
+ * @rules_used: Number of rules used in internal switch
+ * @rules_free: Number of rules free in internal switch
*
* Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
**/
@@ -3603,6 +3627,8 @@ enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
/**
* i40e_aq_oem_post_update - triggers an OEM specific flow after update
* @hw: pointer to the hw struct
+ * @buff: buffer for result
+ * @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
**/
enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
@@ -4053,13 +4079,14 @@ exit:
* @length: length of the section to be written (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
+ * @preservation_flags: Preservation mode flags
* @cmd_details: pointer to command details structure or NULL
*
* Update the NVM using the admin queue commands
**/
enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -4080,6 +4107,16 @@ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
+ cmd->command_flags |=
+ (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
+ I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
+ }
cmd->module_pointer = module_pointer;
cmd->offset = CPU_TO_LE32(offset);
cmd->length = CPU_TO_LE16(length);
@@ -4095,6 +4132,28 @@ i40e_aq_update_nvm_exit:
}
/**
+ * i40e_aq_nvm_progress
+ * @hw: pointer to the hw struct
+ * @progress: pointer to progress returned from AQ
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Gets progress of flash rearrangement process
+ **/
+enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+
+ DEBUGFUNC("i40e_aq_nvm_progress");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ *progress = desc.params.raw[0];
+ return status;
+}
+
+/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
@@ -4408,7 +4467,34 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_aq_set_dcb_parameters
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ * @dcb_enable: True if DCB configuration needs to be applied
+ *
+ **/
+enum i40e_status_code
+i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_dcb_parameters *cmd =
+ (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_dcb_parameters);
+
+ if (dcb_enable) {
+ cmd->valid_flags = I40E_DCB_VALID;
+ cmd->command = I40E_AQ_DCB_SET_AGENT;
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4476,7 +4562,6 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
* @udp_port: the UDP port to add in Host byte order
- * @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
@@ -5219,6 +5304,7 @@ enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @seid: seid of the switching component connected to Physical Port
* @ets_data: Buffer holding ETS parameters
+ * @opcode: Tx scheduler AQ command opcode
* @cmd_details: pointer to command details structure or NULL
**/
enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
@@ -5581,10 +5667,10 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @seid: VSI seid to add ethertype filter from
**/
-#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 seid)
{
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
@@ -6165,6 +6251,7 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
* @ret_buff_size: actual buffer size returned
* @ret_next_table: next block to read
* @ret_next_index: next index to read
+ * @cmd_details: pointer to command details structure or NULL
*
* Dump internal FW/HW data for debug purposes.
*
@@ -6287,7 +6374,7 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
* i40e_read_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -6332,7 +6419,7 @@ enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
* i40e_write_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes specified PHY register value
@@ -6373,7 +6460,7 @@ enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -6447,7 +6534,7 @@ phy_read_end:
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@@ -6514,7 +6601,7 @@ phy_write_end:
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@@ -6550,7 +6637,7 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
- * @phy_adr: PHY address on MDIO interface
+ * @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@@ -6585,7 +6672,6 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
- * @phy_addr: Returned PHY address
*
* Gets PHY address for current port
**/
@@ -6672,6 +6758,64 @@ phy_blinking_end:
}
/**
+ * i40e_led_get_reg - read LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: read register value
+ **/
+static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
+{
+ enum i40e_status_code status;
+ u8 phy_addr = 0;
+
+ *reg_val = 0;
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ phy_addr = i40e_get_phy_address(hw, hw->port);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)reg_val);
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_reg - write LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: register value to write
+ **/
+static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
+{
+ enum i40e_status_code status;
+ u8 phy_addr = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ phy_addr = i40e_get_phy_address(hw, hw->port);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)reg_val);
+ }
+
+ return status;
+}
+
+/**
* i40e_led_get_phy - return current on/off mode
* @hw: pointer to the hw struct
* @led_addr: address of led register to use
@@ -6683,43 +6827,35 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
{
enum i40e_status_code status = I40E_SUCCESS;
u16 gpio_led_port;
+ u32 reg_val_aq;
+ u16 temp_addr;
u8 phy_addr = 0;
u16 reg_val;
- u16 temp_addr;
- u8 port_num;
- u32 i;
- u32 reg_val_aq;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
- status =
- i40e_aq_get_phy_register(hw,
- I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
- I40E_PHY_LED_PROV_REG_1,
- &reg_val_aq, NULL);
- if (status)
- return status;
- *val = (u16)reg_val_aq;
- } else {
- temp_addr = I40E_PHY_LED_PROV_REG_1;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
- for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
- temp_addr++) {
- status =
- i40e_read_phy_register_clause45(hw,
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &reg_val_aq, NULL);
+ if (status == I40E_SUCCESS)
+ *val = (u16)reg_val_aq;
+ return status;
+ }
+ temp_addr = I40E_PHY_LED_PROV_REG_1;
+ phy_addr = i40e_get_phy_address(hw, hw->port);
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ temp_addr++) {
+ status = i40e_read_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
temp_addr, phy_addr,
&reg_val);
- if (status)
- return status;
- *val = reg_val;
- if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
- *led_addr = temp_addr;
- break;
- }
+ if (status)
+ return status;
+ *val = reg_val;
+ if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
+ *led_addr = temp_addr;
+ break;
}
}
return status;
@@ -6729,7 +6865,9 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
* i40e_led_set_phy
* @hw: pointer to the HW structure
* @on: true or false
+ * @led_addr: address of led register to use
* @mode: original val plus bit for set or ignore
+ *
* Set led's on or off when controlled by the PHY
*
**/
@@ -6739,113 +6877,35 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
enum i40e_status_code status = I40E_SUCCESS;
u32 led_ctl = 0;
u32 led_reg = 0;
- u8 phy_addr = 0;
- u8 port_num;
- u32 i;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
- status =
- i40e_aq_get_phy_register(hw,
- I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
- I40E_PHY_LED_PROV_REG_1,
- &led_reg, NULL);
- } else {
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- (u16 *)&led_reg);
- }
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
- status = i40e_aq_set_phy_register(hw,
- I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
- I40E_PHY_LED_PROV_REG_1,
- led_reg, NULL);
- } else {
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- (u16)led_reg);
- }
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
if (status)
return status;
}
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
- status =
- i40e_aq_get_phy_register(hw,
- I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
- I40E_PHY_LED_PROV_REG_1,
- &led_reg, NULL);
- } else {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- (u16 *)&led_reg);
- }
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
-
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
- status =
- i40e_aq_set_phy_register(hw,
- I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
- I40E_PHY_LED_PROV_REG_1,
- led_reg, NULL);
- } else {
- status =
- i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- (u16)led_reg);
- }
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
- status = i40e_aq_set_phy_register(hw,
- I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
- I40E_PHY_LED_PROV_REG_1,
- led_ctl, NULL);
- } else {
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- (u16)led_ctl);
- }
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
}
return status;
+
restore_config:
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
- status =
- i40e_aq_set_phy_register(hw,
- I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
- I40E_PHY_LED_PROV_REG_1,
- led_ctl, NULL);
- } else {
- status =
- i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- (u16)led_ctl);
- }
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
return status;
}
#endif /* PF_DRIVER */
@@ -7002,8 +7062,8 @@ enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
cmd->phy_interface = phy_select;
cmd->dev_addres = dev_addr;
- cmd->reg_address = reg_addr;
- cmd->reg_value = reg_val;
+ cmd->reg_address = CPU_TO_LE32(reg_addr);
+ cmd->reg_value = CPU_TO_LE32(reg_val);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -7036,11 +7096,11 @@ enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
cmd->phy_interface = phy_select;
cmd->dev_addres = dev_addr;
- cmd->reg_address = reg_addr;
+ cmd->reg_address = CPU_TO_LE32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status)
- *reg_val = cmd->reg_value;
+ *reg_val = LE32_TO_CPU(cmd->reg_value);
return status;
}
@@ -7111,9 +7171,9 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
- hw->dev_caps.dcb = msg->vf_offload_flags &
+ hw->dev_caps.dcb = msg->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_L2;
- hw->dev_caps.iwarp = (msg->vf_offload_flags &
+ hw->dev_caps.iwarp = (msg->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
@@ -7147,7 +7207,7 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
/**
* i40e_aq_set_arp_proxy_config
* @hw: pointer to the HW structure
- * @proxy_config - pointer to proxy config command table struct
+ * @proxy_config: pointer to proxy config command table struct
* @cmd_details: pointer to command details
*
* Set ARP offload parameters from pre-populated
@@ -7333,7 +7393,6 @@ enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
return status;
}
-
/**
* i40e_aq_write_ddp - Write dynamic device personalization (ddp)
* @hw: pointer to the hw struct
@@ -7385,6 +7444,7 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
* @buff_size: buffer size in bytes
+ * @flags: AdminQ command flags
* @cmd_details: pointer to command details structure or NULL
**/
enum
diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c
index 9b5405db..7600c922 100644
--- a/drivers/net/i40e/base/i40e_dcb.c
+++ b/drivers/net/i40e/base/i40e_dcb.c
@@ -1277,6 +1277,67 @@ enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
/**
+ * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ * @module: address of the module pointer
+ * @word_offset: offset of LLDP configuration
+ *
+ * Reads the LLDP configuration data from NVM using passed addresses
+ **/
+static enum i40e_status_code _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
+{
+ u32 address, offset = (2 * word_offset);
+ enum i40e_status_code ret;
+ u16 mem;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(mem), &mem, true,
+ NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ /* Check if this pointer needs to be read in word size or 4K sector
+ * units.
+ */
+ if (mem & I40E_PTR_TYPE)
+ address = (0x7FFF & mem) * 4096;
+ else
+ address = (0x7FFF & mem) * 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, module, offset, sizeof(mem), &mem, true,
+ NULL);
+ i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ offset = mem + word_offset;
+ offset *= 2;
+
+ ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret != I40E_SUCCESS)
+ goto err_lldp_cfg;
+
+ ret = i40e_aq_read_nvm(hw, 0, address + offset,
+ sizeof(struct i40e_lldp_variables), lldp_cfg,
+ true, NULL);
+ i40e_release_nvm(hw);
+
+err_lldp_cfg:
+ return ret;
+}
+
+/**
* i40e_read_lldp_cfg - read LLDP Configuration data from NVM
* @hw: pointer to the HW structure
* @lldp_cfg: pointer to hold lldp configuration variables
@@ -1287,21 +1348,34 @@ enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg)
{
enum i40e_status_code ret = I40E_SUCCESS;
- u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR);
+ u32 mem;
if (!lldp_cfg)
return I40E_ERR_PARAM;
ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret != I40E_SUCCESS)
- goto err_lldp_cfg;
+ return ret;
- ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset,
- sizeof(struct i40e_lldp_variables),
- (u8 *)lldp_cfg,
- true, NULL);
+ ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem),
+ &mem, true, NULL);
i40e_release_nvm(hw);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ /* Read a bit that holds information whether we are running flat or
+ * structured NVM image. Flat image has LLDP configuration in shadow
+ * ram, so there is a need to pass different addresses for both cases.
+ */
+ if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) {
+ /* Flat NVM case */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR,
+ I40E_SR_LLDP_CFG_PTR);
+ } else {
+ /* Good old structured NVM image */
+ ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR,
+ I40E_NVM_LLDP_CFG_PTR);
+ }
-err_lldp_cfg:
return ret;
}
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index f4a87842..66ff1ccf 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -76,4 +76,7 @@ POSSIBILITY OF SUCH DAMAGE.
(d) == I40E_DEV_ID_QSFP_B || \
(d) == I40E_DEV_ID_QSFP_C)
+#define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \
+ (d) == I40E_DEV_ID_25G_SFP28)
+
#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/i40e/base/i40e_hmc.c b/drivers/net/i40e/base/i40e_hmc.c
index 75d38412..502407bd 100644
--- a/drivers/net/i40e/base/i40e_hmc.c
+++ b/drivers/net/i40e/base/i40e_hmc.c
@@ -210,7 +210,6 @@ exit:
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
- * @is_pf: distinguishes a VF from a PF
*
* This function:
* 1. Marks the entry in pd tabe (for paged address mode) or in sd table
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index a1e78300..c77dac02 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -33,18 +33,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_prototype.h"
-enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data);
-enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
- u16 *data);
-enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
- bool last_command);
-
/**
* i40e_init_nvm_ops - Initialize NVM function pointers
* @hw: pointer to the HW structure
@@ -207,52 +195,6 @@ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @data: word read from the Shadow RAM
- *
- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
- **/
-enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
-{
- enum i40e_status_code ret_code = I40E_SUCCESS;
-
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
- }
- i40e_release_nvm(hw);
- }
- return ret_code;
-}
-
-/**
- * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @data: word read from the Shadow RAM
- *
- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
- **/
-enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
- u16 offset,
- u16 *data)
-{
- enum i40e_status_code ret_code = I40E_SUCCESS;
-
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
- return ret_code;
-}
-
-/**
* i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -260,8 +202,9 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data)
+STATIC enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw,
+ u16 offset,
+ u16 *data)
{
enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
@@ -303,15 +246,68 @@ read_nvm_exit:
}
/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ DEBUGFUNC("i40e_read_nvm_aq");
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
* i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ * Reads one 16 bit word from the Shadow RAM using the AdminQ
**/
-enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
- u16 *data)
+STATIC enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
@@ -324,55 +320,49 @@ enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
}
/**
- * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
+ * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
* @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
*
- * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
- * method. The buffer read is preceded by the NVM ownership take
- * and followed by the release.
+ * Reads one 16 bit word from the Shadow RAM.
+ *
+ * Do not use this function except in cases where the nvm lock is already
+ * taken via i40e_acquire_nvm().
**/
-enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
- u16 offset,
- u16 *words, u16 *data)
+enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset,
+ u16 *data)
{
- enum i40e_status_code ret_code = I40E_SUCCESS;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
- else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
- return ret_code;
+ return i40e_read_nvm_word_aq(hw, offset, data);
+
+ return i40e_read_nvm_word_srctl(hw, offset, data);
}
/**
- * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
+ * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
* @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
*
- * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
- * method. The buffer read is preceded by the NVM ownership take
- * and followed by the release.
+ * Reads one 16 bit word from the Shadow RAM.
**/
-enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
- data);
- i40e_release_nvm(hw);
- }
- } else {
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
- }
+
+ if (ret_code)
+ return ret_code;
+ ret_code = __i40e_read_nvm_word(hw, offset, data);
+
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ i40e_release_nvm(hw);
return ret_code;
}
@@ -387,8 +377,8 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+STATIC enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
u16 index, word;
@@ -420,8 +410,8 @@ enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+STATIC enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
enum i40e_status_code ret_code;
u16 read_size = *words;
@@ -469,53 +459,51 @@ read_nvm_buffer_aq_exit:
}
/**
- * i40e_read_nvm_aq - Read Shadow RAM.
- * @hw: pointer to the HW structure.
- * @module_pointer: module pointer location in words from the NVM beginning
- * @offset: offset in words from module start
- * @words: number of words to write
- * @data: buffer with words to write to the Shadow RAM
- * @last_command: tells the AdminQ that this is the last command
+ * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
*
- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method.
**/
-enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
- bool last_command)
+enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset,
+ u16 *words, u16 *data)
{
- enum i40e_status_code ret_code = I40E_ERR_NVM;
- struct i40e_asq_cmd_details cmd_details;
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
- DEBUGFUNC("i40e_read_nvm_aq");
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
- /* Here we are checking the SR limit only for the flat memory model.
- * We cannot do it for the module-based model, as we did not acquire
- * the NVM resource yet (we cannot get the module pointer value).
- * Firmware will check the module-based model.
- */
- if ((offset + words) > hw->nvm.sr_size)
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write error: offset %d beyond Shadow RAM limit %d\n",
- (offset + words), hw->nvm.sr_size);
- else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
- /* We can write only up to 4KB (one sector), in one AQ write */
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write fail error: tried to write %d words, limit is %d.\n",
- words, I40E_SR_SECTOR_SIZE_IN_WORDS);
- else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
- != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
- /* A single write cannot spread over two sectors */
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
- offset, words);
- else
- ret_code = i40e_aq_read_nvm(hw, module_pointer,
- 2 * offset, /*bytes*/
- 2 * words, /*bytes*/
- data, last_command, &cmd_details);
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
return ret_code;
}
@@ -561,7 +549,8 @@ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, &cmd_details);
+ data, last_command, 0,
+ &cmd_details);
return ret_code;
}
@@ -650,16 +639,14 @@ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
data = (u16 *)vmem.va;
/* read pointer to VPD area */
- ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
- &vpd_module);
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
- ret_code = __i40e_read_nvm_word(hw,
- I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -749,25 +736,19 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
DEBUGFUNC("i40e_validate_nvm_checksum");
- /* acquire_nvm provides exclusive NVM lock to synchronize access across
- * PFs. X710 uses i40e_read_nvm_word_srctl which polls for done bit
- * twice (first time to be able to write address to I40E_GLNVM_SRCTL
- * register, second to read data from I40E_GLNVM_SRDATA. One PF can see
- * done bit and try to write address, while another one will interpret
- * it as a good time to read data. It will cause invalid data to be
- * read.
+ /* We must acquire the NVM lock in order to correctly synchronize the
+ * NVM accesses across multiple PFs. Without doing so it is possible
+ * for one of the PFs to read invalid data potentially indicating that
+ * the checksum is invalid.
*/
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ if (ret_code)
+ return ret_code;
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
i40e_release_nvm(hw);
- if (ret_code != I40E_SUCCESS)
- goto i40e_validate_nvm_checksum_exit;
- } else {
- goto i40e_validate_nvm_checksum_exit;
- }
-
- i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ if (ret_code)
+ return ret_code;
/* Verify read checksum from EEPROM is the same as
* calculated checksum
@@ -779,7 +760,6 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
if (checksum)
*checksum = checksum_local;
-i40e_validate_nvm_checksum_exit:
return ret_code;
}
@@ -810,6 +790,9 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno);
+STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -819,6 +802,12 @@ STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
+STATIC INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
+{
+ return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
+ I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+}
+
STATIC const char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
@@ -836,6 +825,7 @@ STATIC const char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_STATUS",
"I40E_NVMUPD_EXEC_AQ",
"I40E_NVMUPD_GET_AQ_RESULT",
+ "I40E_NVMUPD_GET_AQ_EVENT",
};
/**
@@ -907,7 +897,11 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
/* Acquire lock to prevent race condition where adminq_task
* can execute after i40e_nvmupd_nvm_read/write but before state
- * variables (nvm_wait_opcode, nvm_release_on_done) are updated
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
+ *
+ * During NVMUpdate, it is observed that lock could be held for
+ * ~5ms for most commands. However lock is held for ~60ms for
+ * NVMUPD_CSUM_LCB command.
*/
i40e_acquire_spinlock(&hw->aq.arq_spinlock);
switch (hw->nvmupd_state) {
@@ -929,8 +923,9 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
* the wait info and return before doing anything else
*/
if (cmd->offset == 0xffff) {
- i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
- return I40E_SUCCESS;
+ i40e_nvmupd_clear_wait_state(hw);
+ status = I40E_SUCCESS;
+ break;
}
status = I40E_ERR_NOT_READY;
@@ -945,6 +940,7 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
+
i40e_release_spinlock(&hw->aq.arq_spinlock);
return status;
}
@@ -1075,6 +1071,10 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
break;
+ case I40E_NVMUPD_GET_AQ_EVENT:
+ status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
+ break;
+
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
@@ -1253,39 +1253,55 @@ retry:
}
/**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
* @hw: pointer to the hardware structure
- * @opcode: the event that just happened
**/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
- if (opcode == hw->nvm_wait_opcode) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
- if (hw->nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->nvm_release_on_done = false;
- }
- hw->nvm_wait_opcode = 0;
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
- if (hw->aq.arq_last_status) {
- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
- return;
- }
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
- default:
- break;
- }
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ * @desc: AdminQ descriptor
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
+{
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
+
+ if (opcode == hw->nvm_wait_opcode) {
+ i40e_memcpy(&hw->nvm_aq_event_desc, desc,
+ aq_desc_len, I40E_NONDMA_TO_NONDMA);
+ i40e_nvmupd_clear_wait_state(hw);
}
}
@@ -1343,6 +1359,9 @@ STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
else if (module == 0)
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
}
break;
@@ -1405,6 +1424,9 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u32 aq_data_len;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return I40E_SUCCESS;
+
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -1441,6 +1463,9 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
}
}
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
/* and away we go! */
status = i40e_asq_send_command(hw, aq_desc, buff,
buff_size, &cmd_details);
@@ -1450,6 +1475,7 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
}
/* should we wait for a followup event? */
@@ -1531,6 +1557,41 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
}
/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
+ I40E_NONDMA_TO_NONDMA);
+
+ return I40E_SUCCESS;
+}
+
+/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1625,18 +1686,20 @@ STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
enum i40e_status_code status = I40E_SUCCESS;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
+ u8 preservation_flags;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last,
- &cmd_details);
+ preservation_flags, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index acb2023f..c6ec2d76 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -263,7 +263,9 @@ enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
- bool last_command,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
@@ -290,6 +292,10 @@ enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details
+ *cmd_details);
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
@@ -481,7 +487,9 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc);
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
#endif /* PF_DRIVER */
@@ -496,6 +504,38 @@ STATIC INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
return i40e_ptype_lookup[ptype];
}
+#ifdef PF_DRIVER
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+STATIC INLINE enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+ switch (link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ return VIRTCHNL_LINK_SPEED_100MB;
+ case I40E_LINK_SPEED_1GB:
+ return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_10GB:
+ return VIRTCHNL_LINK_SPEED_10GB;
+ case I40E_LINK_SPEED_40GB:
+ return VIRTCHNL_LINK_SPEED_40GB;
+ case I40E_LINK_SPEED_20GB:
+ return VIRTCHNL_LINK_SPEED_20GB;
+ case I40E_LINK_SPEED_25GB:
+ return VIRTCHNL_LINK_SPEED_25GB;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+ }
+}
+#endif /* PF_DRIVER */
/* prototype for functions used for SW spinlocks */
void i40e_init_spinlock(struct i40e_spinlock *sp);
void i40e_acquire_spinlock(struct i40e_spinlock *sp);
diff --git a/drivers/net/i40e/base/i40e_status.h b/drivers/net/i40e/base/i40e_status.h
index 5632ff2b..49af2d9f 100644
--- a/drivers/net/i40e/base/i40e_status.h
+++ b/drivers/net/i40e/base/i40e_status.h
@@ -102,6 +102,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
+ I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index dca725af..006a11a8 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -77,6 +77,9 @@ POSSIBILITY OF SUCH DAMAGE.
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT 500
+
/* Check whether address is multicast. */
#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
@@ -351,6 +354,10 @@ struct i40e_phy_info {
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
@@ -483,6 +490,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
+ I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@@ -502,15 +510,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
-#define I40E_NVM_TRANS_SHIFT 8
-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_CON 0x0
-#define I40E_NVM_SNT 0x1
-#define I40E_NVM_LCB 0x2
-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA 0x4
-#define I40E_NVM_CSUM 0x8
-#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
+#define I40E_NVM_PRESERVATION_FLAGS_MASK \
+ (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
+#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
+#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_AQE 0xe
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
@@ -526,6 +540,19 @@ struct i40e_nvm_access {
u8 data[1];
};
+/* (Q)SFP module access definitions */
+#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
+#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
+#define I40E_MODULE_TYPE_ADDR 0x00
+#define I40E_MODULE_REVISION_ADDR 0x01
+#define I40E_MODULE_SFF_8472_COMP 0x5E
+#define I40E_MODULE_SFF_8472_SWAP 0x5C
+#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_SFF_DIAG_CAPAB 0x40
+#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
+#define I40E_MODULE_TYPE_QSFP28 0x11
+#define I40E_MODULE_QSFP_MAX_LEN 640
+
/* PCI bus types */
enum i40e_bus_type {
i40e_bus_type_unknown = 0,
@@ -680,6 +707,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@@ -702,6 +730,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
/* Used in set switch config AQ command */
@@ -1468,7 +1497,8 @@ struct i40e_hw_port_stats {
#define I40E_SR_PE_IMAGE_PTR 0x0C
#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
#define I40E_SR_MNG_CONFIG_PTR 0x0E
-#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_EMP_MODULE_PTR 0x0F
+#define I40E_SR_EMP_MODULE_PTR 0x48
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
@@ -1509,6 +1539,9 @@ struct i40e_hw_port_stats {
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
+#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
+#define I40E_PTR_TYPE BIT(15)
/* Shadow RAM related */
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
@@ -1826,7 +1859,8 @@ enum i40e_reset_type {
};
/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR 0xD
+#define I40E_NVM_LLDP_CFG_PTR 0x06
+#define I40E_SR_LLDP_CFG_PTR 0x31
struct i40e_lldp_variables {
u16 length;
u16 adminstatus;
diff --git a/drivers/net/i40e/base/meson.build b/drivers/net/i40e/base/meson.build
new file mode 100644
index 00000000..401a1477
--- /dev/null
+++ b/drivers/net/i40e/base/meson.build
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = [
+ 'i40e_adminq.c',
+ 'i40e_common.c',
+ 'i40e_dcb.c',
+ 'i40e_diag.c',
+ 'i40e_hmc.c',
+ 'i40e_lan_hmc.c',
+ 'i40e_nvm.c'
+]
+
+error_cflags = ['-Wno-sign-compare', '-Wno-unused-value',
+ '-Wno-format', '-Wno-unused-but-set-variable',
+ '-Wno-strict-aliasing'
+]
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('i40e_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/net/i40e/base/virtchnl.h b/drivers/net/i40e/base/virtchnl.h
index f00dd360..b2d5fe73 100644
--- a/drivers/net/i40e/base/virtchnl.h
+++ b/drivers/net/i40e/base/virtchnl.h
@@ -240,7 +240,7 @@ struct virtchnl_vsi_resource {
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
-/* VF offload flags
+/* VF capability flags
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
@@ -269,7 +269,7 @@ struct virtchnl_vf_resource {
u16 max_vectors;
u16 max_mtu;
- u32 vf_offload_flags;
+ u32 vf_cap_flags;
u32 rss_key_size;
u32 rss_lut_size;
@@ -349,8 +349,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
* additional queues must be negotiated. This is a best effort request as it
* is possible the PF does not have enough queues left to support the request.
* If the PF cannot support the number requested it will respond with the
- * maximum number it is able to support; otherwise it will respond with the
- * number requested.
+ * maximum number it is able to support. If the request is successful, PF will
+ * then reset the VF to institute required changes.
*/
/* VF resource request */
@@ -509,7 +509,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
+ u8 lut[1]; /* RSS lookup table */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
@@ -764,7 +764,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
return VIRTCHNL_ERR_PARAM;
}
/* few more checks */
- if ((valid_len != msglen) || (err_msg_format))
+ if (err_msg_format || valid_len != msglen)
return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
return 0;
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 811cc9ff..508b4171 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <stdio.h>
@@ -45,7 +16,7 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
@@ -684,6 +655,15 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
return 0;
}
+static inline void
+i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+ i40e_write_rx_ctl(hw, reg_addr, reg_val);
+ PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
+ "with value 0x%08x",
+ reg_addr, reg_val);
+}
+
RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
@@ -701,24 +681,31 @@ RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
{
/*
- * Force global configuration for flexible payload
- * to the first 16 bytes of the corresponding L2/L3/L4 paylod.
- * This should be removed from code once proper
- * configuration API is added to avoid configuration conflicts
- * between ports of the same device.
- */
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
-
- /*
* Initialize registers for parsing packet type of QinQ
* This should be removed from code once proper
* configuration API is added to avoid configuration conflicts
* between ports of the same device.
*/
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
- I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+ i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
+}
+
+static inline void i40e_config_automask(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t val;
+
+ /* INTENA flag is not auto-cleared for interrupt */
+ val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+ I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+
+ /* If support multi-driver, PF will use INT0. */
+ if (!pf->support_multi_driver)
+ val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
+
+ I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
}
#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
@@ -1006,7 +993,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
struct rte_hash_parameters fdir_hash_params = {
.name = fdir_hash_name,
.entries = I40E_MAX_FDIR_FILTER_NUM,
- .key_len = sizeof(struct rte_eth_fdir_input),
+ .key_len = sizeof(struct i40e_fdir_input),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
.socket_id = rte_socket_id(),
@@ -1068,6 +1055,68 @@ i40e_init_queue_region_conf(struct rte_eth_dev *dev)
memset(info, 0, sizeof(struct i40e_queue_regions));
}
+#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
+
+static int
+i40e_parse_multi_drv_handler(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct i40e_pf *pf;
+ unsigned long support_multi_driver;
+ char *end;
+
+ pf = (struct i40e_pf *)opaque;
+
+ errno = 0;
+ support_multi_driver = strtoul(value, &end, 10);
+ if (errno != 0 || end == value || *end != 0) {
+ PMD_DRV_LOG(WARNING, "Wrong global configuration");
+ return -(EINVAL);
+ }
+
+ if (support_multi_driver == 1 || support_multi_driver == 0)
+ pf->support_multi_driver = (bool)support_multi_driver;
+ else
+ PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
+ "enable global configuration by default."
+ ETH_I40E_SUPPORT_MULTI_DRIVER);
+ return 0;
+}
+
+static int
+i40e_support_multi_driver(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ static const char *const valid_keys[] = {
+ ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
+ struct rte_kvargs *kvlist;
+
+ /* Enable global configuration by default */
+ pf->support_multi_driver = false;
+
+ if (!dev->device->devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+
+ if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
+ PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+ "the first invalid or last valid one is used !",
+ ETH_I40E_SUPPORT_MULTI_DRIVER);
+
+ if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
+ i40e_parse_multi_drv_handler, pf) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
@@ -1096,7 +1145,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
return 0;
}
i40e_set_default_ptype_table(dev);
- i40e_set_default_pctype_table(dev);
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
@@ -1122,6 +1170,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
hw->bus.func = pci_dev->addr.function;
hw->adapter_stopped = 0;
+ /* Check if need to support multi-driver */
+ i40e_support_multi_driver(dev);
+
/* Make sure all is clean before doing PF reset */
i40e_clear_hw(hw);
@@ -1142,13 +1193,17 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
return ret;
}
+ i40e_config_automask(pf);
+
+ i40e_set_default_pctype_table(dev);
+
/*
* To work around the NVM issue, initialize registers
- * for flexible payload and packet type of QinQ by
- * software. It should be removed once issues are fixed
- * in NVM.
+ * for packet type of QinQ by software.
+ * It should be removed once issues are fixed in NVM.
*/
- i40e_GLQF_reg_init(hw);
+ if (!pf->support_multi_driver)
+ i40e_GLQF_reg_init(hw);
/* Initialize the input set for filters (hash and fd) to default value */
i40e_filter_input_set_init(pf);
@@ -1168,10 +1223,17 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
(hw->nvm.version & 0xf), hw->nvm.eetrack);
/* initialise the L3_MAP register */
- ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
- 0x00000028, NULL);
- if (ret)
- PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret);
+ if (!pf->support_multi_driver) {
+ ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
+ 0x00000028, NULL);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
+ ret);
+ PMD_INIT_LOG(DEBUG,
+ "Global register 0x%08x is changed with 0x28",
+ I40E_GLQF_L3_MAP(40));
+ i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
+ }
/* Need the special FW version to support floating VEB */
config_floating_veb(dev);
@@ -1247,11 +1309,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
i40e_set_fc(hw, &aq_fail, TRUE);
/* Set the global registers with default ether type value */
- ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
- if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR,
- "Failed to set the default outer VLAN ether type");
- goto err_setup_pf_switch;
+ if (!pf->support_multi_driver) {
+ ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ ETHER_TYPE_VLAN);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR,
+ "Failed to set the default outer "
+ "VLAN ether type");
+ goto err_setup_pf_switch;
+ }
}
/* PF setup, which includes VSI setup */
@@ -1315,6 +1381,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+
+ /* By default disable flexible payload in global configuration */
+ if (!pf->support_multi_driver)
+ i40e_flex_payload_reg_set_default(hw);
+
/*
* Add an ethertype filter to drop all flow control frames transmitted
* from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
@@ -1349,6 +1420,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize queue region configuration */
i40e_init_queue_region_conf(dev);
+ /* initialize rss configuration from rte_flow */
+ memset(&pf->rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+
return 0;
err_init_fdir_filter_list:
@@ -1435,6 +1510,18 @@ i40e_rm_fdir_filter_list(struct i40e_pf *pf)
}
}
+void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
+{
+ /*
+ * Disable by default flexible payload
+ * for corresponding L2/L3/L4 layers.
+ */
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
+ i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
+}
+
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
@@ -1640,6 +1727,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
int i;
uint32_t val;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
/* Bind all RX queues to allocated MSIX interrupt */
for (i = 0; i < nb_queue; i++) {
@@ -1658,7 +1746,8 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
/* Write first RX queue to Link list register as the head element */
if (vsi->type != I40E_VSI_SRIOV) {
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1,
+ pf->support_multi_driver);
if (msix_vect == I40E_MISC_VEC_ID) {
I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
@@ -1717,7 +1806,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
uint16_t queue_idx = 0;
int record = 0;
- uint32_t val;
int i;
for (i = 0; i < vsi->nb_qps; i++) {
@@ -1725,13 +1813,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
}
- /* INTENA flag is not auto-cleared for interrupt */
- val = I40E_READ_REG(hw, I40E_GLINT_CTL);
- val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
- I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
- I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
- I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
-
/* VF bind interrupt */
if (vsi->type == I40E_VSI_SRIOV) {
__vsi_queues_bind_intr(vsi, msix_vect,
@@ -1788,27 +1869,22 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
- uint16_t interval = i40e_calc_itr_interval(\
- RTE_LIBRTE_I40E_ITR_INTERVAL);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
uint16_t msix_intr, i;
- if (rte_intr_allow_others(intr_handle))
+ if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
for (i = 0; i < vsi->nb_msix; i++) {
msix_intr = vsi->msix_intr + i;
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
}
else
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
}
@@ -1820,16 +1896,18 @@ i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
uint16_t msix_intr, i;
- if (rte_intr_allow_others(intr_handle))
+ if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
for (i = 0; i < vsi->nb_msix; i++) {
msix_intr = vsi->msix_intr + i;
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
- 0);
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
}
else
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
}
@@ -2048,6 +2126,16 @@ i40e_dev_start(struct rte_eth_dev *dev)
}
}
+ /* Enable mac loopback mode */
+ if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
+ dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
+ ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "fail to set loopback link");
+ goto err_up;
+ }
+ }
+
/* Apply link configure */
if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
@@ -2154,9 +2242,6 @@ i40e_dev_stop(struct rte_eth_dev *dev)
/* reset hierarchy commit */
pf->tm_conf.committed = false;
- /* Remove all the queue region configuration */
- i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
-
hw->adapter_stopped = 1;
}
@@ -2222,6 +2307,10 @@ i40e_dev_close(struct rte_eth_dev *dev)
i40e_res_pool_destroy(&pf->qp_pool);
i40e_res_pool_destroy(&pf->msix_pool);
+ /* Disable flexible payload in global configuration */
+ if (!pf->support_multi_driver)
+ i40e_flex_payload_reg_set_default(hw);
+
/* force a PF reset to clean anything leftover */
reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
@@ -2531,6 +2620,22 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
pf->offset_loaded,
&pf->internal_stats_offset.rx_broadcast,
&pf->internal_stats.rx_broadcast);
+ /* Get total internal tx packet count */
+ i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
+ I40E_GLV_UPTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_unicast,
+ &pf->internal_stats.tx_unicast);
+ i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
+ I40E_GLV_MPTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_multicast,
+ &pf->internal_stats.tx_multicast);
+ i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
+ I40E_GLV_BPTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_stats_offset.tx_broadcast,
+ &pf->internal_stats.tx_broadcast);
/* exclude CRC size */
pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
@@ -2560,16 +2665,32 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
ns->eth.rx_broadcast) * ETHER_CRC_LEN;
- /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
- * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive
+ /* exclude internal rx bytes
+ * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
+ * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
* value.
+ * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
*/
if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
ns->eth.rx_bytes = 0;
- /* exlude internal rx bytes */
else
ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
+ if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
+ ns->eth.rx_unicast = 0;
+ else
+ ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
+
+ if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
+ ns->eth.rx_multicast = 0;
+ else
+ ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
+
+ if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
+ ns->eth.rx_broadcast = 0;
+ else
+ ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
+
i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
pf->offset_loaded, &os->eth.rx_discards,
&ns->eth.rx_discards);
@@ -2598,12 +2719,32 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
ns->eth.tx_broadcast) * ETHER_CRC_LEN;
- /* exclude internal tx bytes */
+ /* exclude internal tx bytes
+ * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
+ * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
+ * value.
+ * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
+ */
if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
ns->eth.tx_bytes = 0;
else
ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
+ if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
+ ns->eth.tx_unicast = 0;
+ else
+ ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
+
+ if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
+ ns->eth.tx_multicast = 0;
+ else
+ ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
+
+ if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
+ ns->eth.tx_broadcast = 0;
+ else
+ ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
+
/* GLPRT_TEPC not supported */
/* additional port specific stats */
@@ -3040,7 +3181,9 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3173,8 +3316,8 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
return -EIO;
}
PMD_DRV_LOG(DEBUG,
- "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
- reg_w, reg_id);
+ "Global register 0x%08x is changed with value 0x%08x",
+ I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
return 0;
}
@@ -3185,6 +3328,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
uint16_t tpid)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
int ret = 0;
@@ -3195,6 +3339,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
"Unsupported vlan type.");
return -EINVAL;
}
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
+ return -ENOTSUP;
+ }
+
/* 802.1ad frames ability is added in NVM API 1.7*/
if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
if (qinq) {
@@ -3217,6 +3367,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
/* If NVM API < 1.7, keep the register setting */
ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
tpid, qinq);
+ i40e_global_cfg_warning(I40E_WARNING_TPID);
return ret;
}
@@ -3446,19 +3597,25 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
}
- /* config the water marker both based on the packets and bytes */
- I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
- (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
- I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
- (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
- I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
- pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT);
- I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
- pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
- << I40E_KILOSHIFT);
+ if (!pf->support_multi_driver) {
+ /* config water marker both based on the packets and bytes */
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
+ (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
+ (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
+ pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+ I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
+ pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+ << I40E_KILOSHIFT);
+ i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
+ } else {
+ PMD_DRV_LOG(ERR,
+ "Water marker configuration is not supported.");
+ }
I40E_WRITE_FLUSH(hw);
@@ -3678,6 +3835,7 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
{
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint32_t reg;
int ret;
if (!lut)
@@ -3694,14 +3852,22 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
uint32_t *lut_dw = (uint32_t *)lut;
uint16_t i, lut_size_dw = lut_size / 4;
- for (i = 0; i < lut_size_dw; i++)
- lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= lut_size_dw; i++) {
+ reg = I40E_VFQF_HLUT1(i, vsi->user_param);
+ lut_dw[i] = i40e_read_rx_ctl(hw, reg);
+ }
+ } else {
+ for (i = 0; i < lut_size_dw; i++)
+ lut_dw[i] = I40E_READ_REG(hw,
+ I40E_PFQF_HLUT(i));
+ }
}
return 0;
}
-static int
+int
i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
{
struct i40e_pf *pf;
@@ -3725,8 +3891,17 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
uint32_t *lut_dw = (uint32_t *)lut;
uint16_t i, lut_size_dw = lut_size / 4;
- for (i = 0; i < lut_size_dw; i++)
- I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(
+ hw,
+ I40E_VFQF_HLUT1(i, vsi->user_param),
+ lut_dw[i]);
+ } else {
+ for (i = 0; i < lut_size_dw; i++)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
+ lut_dw[i]);
+ }
I40E_WRITE_FLUSH(hw);
}
@@ -3971,6 +4146,68 @@ i40e_get_cap(struct i40e_hw *hw)
return ret;
}
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
+#define QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
+
+static int i40e_pf_parse_vf_queue_number_handler(const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct i40e_pf *pf;
+ unsigned long num;
+ char *end;
+
+ pf = (struct i40e_pf *)opaque;
+ RTE_SET_USED(key);
+
+ errno = 0;
+ num = strtoul(value, &end, 0);
+ if (errno != 0 || end == value || *end != 0) {
+ PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
+ "kept the value = %hu", value, pf->vf_nb_qp_max);
+ return -(EINVAL);
+ }
+
+ if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
+ pf->vf_nb_qp_max = (uint16_t)num;
+ else
+ /* here return 0 to make next valid same argument work */
+ PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
+ "power of 2 and equal or less than 16 !, Now it is "
+ "kept the value = %hu", num, pf->vf_nb_qp_max);
+
+ return 0;
+}
+
+static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
+{
+ static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_kvargs *kvlist;
+
+ /* set default queue number per VF as 4 */
+ pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+
+ if (dev->device->devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (kvlist == NULL)
+ return -(EINVAL);
+
+ if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
+ PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+ "the first invalid or last valid one is used !",
+ QUEUE_NUM_PER_VF_ARG);
+
+ rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
+ i40e_pf_parse_vf_queue_number_handler, pf);
+
+ rte_kvargs_free(kvlist);
+
+ return 0;
+}
+
static int
i40e_pf_parameter_init(struct rte_eth_dev *dev)
{
@@ -3983,6 +4220,9 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
return -EINVAL;
}
+
+ i40e_pf_config_vf_rxq_number(dev);
+
/* Add the parameter init for LFC */
pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
@@ -3992,7 +4232,6 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
pf->max_num_vsi = hw->func_caps.num_vsis;
pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
- pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
/* FDir queue/VSI allocation */
pf->fdir_qp_offset = 0;
@@ -4022,7 +4261,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
pf->flags |= I40E_FLAG_SRIOV;
- pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
+ pf->vf_nb_qps = pf->vf_nb_qp_max;
pf->vf_num = pci_dev->max_vfs;
PMD_DRV_LOG(DEBUG,
"%u VF VSIs, %u queues per VF VSI, in total %u queues",
@@ -4950,16 +5189,28 @@ i40e_vsi_setup(struct i40e_pf *pf,
/* VF has MSIX interrupt in VF range, don't allocate here */
if (type == I40E_VSI_MAIN) {
- ret = i40e_res_pool_alloc(&pf->msix_pool,
- RTE_MIN(vsi->nb_qps,
- RTE_MAX_RXTX_INTR_VEC_ID));
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
- vsi->seid, ret);
- goto fail_queue_alloc;
+ if (pf->support_multi_driver) {
+ /* If support multi-driver, need to use INT0 instead of
+ * allocating from msix pool. The Msix pool is init from
+ * INT1, so it's OK just set msix_intr to 0 and nb_msix
+ * to 1 without calling i40e_res_pool_alloc.
+ */
+ vsi->msix_intr = 0;
+ vsi->nb_msix = 1;
+ } else {
+ ret = i40e_res_pool_alloc(&pf->msix_pool,
+ RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "VSI MAIN %d get heap failed %d",
+ vsi->seid, ret);
+ goto fail_queue_alloc;
+ }
+ vsi->msix_intr = ret;
+ vsi->nb_msix = RTE_MIN(vsi->nb_qps,
+ RTE_MAX_RXTX_INTR_VEC_ID);
}
- vsi->msix_intr = ret;
- vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
} else if (type != I40E_VSI_SRIOV) {
ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
if (ret < 0) {
@@ -5315,15 +5566,15 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
int mask = 0;
/* Apply vlan offload setting */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+ mask = ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
ret = i40e_vlan_offload_set(dev, mask);
if (ret) {
PMD_DRV_LOG(INFO, "Failed to update vlan offload");
return ret;
}
- /* Apply double-vlan setting, not implemented yet */
-
/* Apply pvid setting */
ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
data->dev_conf.txmode.hw_vlan_insert_pvid);
@@ -5876,7 +6127,8 @@ void
i40e_pf_disable_irq0(struct i40e_hw *hw)
{
/* Disable all interrupt types */
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
}
@@ -5996,7 +6248,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
ret = i40e_dev_link_update(dev, 0);
if (!ret)
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
+ RTE_ETH_EVENT_INTR_LSC, NULL);
break;
default:
PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
@@ -6698,17 +6950,20 @@ i40e_pf_disable_rss(struct i40e_pf *pf)
I40E_WRITE_FLUSH(hw);
}
-static int
+int
i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
{
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
+ I40E_VFQF_HKEY_MAX_INDEX :
+ I40E_PFQF_HKEY_MAX_INDEX;
int ret = 0;
if (!key || key_len == 0) {
PMD_DRV_LOG(DEBUG, "No key to be configured");
return 0;
- } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ } else if (key_len != (key_idx + 1) *
sizeof(uint32_t)) {
PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
return -EINVAL;
@@ -6725,8 +6980,18 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
uint32_t *hash_key = (uint32_t *)key;
uint16_t i;
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ I40E_WRITE_REG(
+ hw,
+ I40E_VFQF_HKEY1(i, vsi->user_param),
+ hash_key[i]);
+
+ } else {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
+ hash_key[i]);
+ }
I40E_WRITE_FLUSH(hw);
}
@@ -6738,6 +7003,7 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
{
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ uint32_t reg;
int ret;
if (!key || !key_len)
@@ -6754,11 +7020,22 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
uint32_t *key_dw = (uint32_t *)key;
uint16_t i;
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
+ if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
+ reg = I40E_VFQF_HKEY1(i, vsi->user_param);
+ key_dw[i] = i40e_read_rx_ctl(hw, reg);
+ }
+ *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ } else {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+ reg = I40E_PFQF_HKEY(i);
+ key_dw[i] = i40e_read_rx_ctl(hw, reg);
+ }
+ *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
}
- *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
-
return 0;
}
@@ -6951,7 +7228,7 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
uint8_t add)
{
uint16_t ip_type;
- uint32_t ipv4_addr;
+ uint32_t ipv4_addr, ipv4_addr_le;
uint8_t i, tun_type = 0;
/* internal varialbe to convert ipv6 byte order */
uint32_t convert_ipv6[4];
@@ -6984,8 +7261,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
rte_memcpy(&pfilter->element.ipaddr.v4.data,
- &rte_cpu_to_le_32(ipv4_addr),
+ &ipv4_addr_le,
sizeof(pfilter->element.ipaddr.v4.data));
} else {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
@@ -7036,11 +7314,13 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
if (add && node) {
PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ rte_free(cld_filter);
return -EINVAL;
}
if (!add && !node) {
PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ rte_free(cld_filter);
return -EINVAL;
}
@@ -7049,16 +7329,26 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
vsi->seid, &cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ rte_free(cld_filter);
return -ENOTSUP;
}
tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ if (tunnel == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ rte_free(cld_filter);
+ return -ENOMEM;
+ }
+
rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ if (ret < 0)
+ rte_free(tunnel);
} else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
&cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ rte_free(cld_filter);
return -ENOTSUP;
}
ret = i40e_sw_tunnel_filter_del(pf, &node->input);
@@ -7084,6 +7374,11 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
enum i40e_status_code status = I40E_SUCCESS;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
memset(&filter_replace, 0,
sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
memset(&filter_replace_buf, 0,
@@ -7120,6 +7415,13 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
+ if (!status) {
+ i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
+ PMD_DRV_LOG(DEBUG, "Global configuration modification: "
+ "cloud l1 type is changed from 0x%x to 0x%x",
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+ }
return status;
}
@@ -7131,6 +7433,11 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
enum i40e_status_code status = I40E_SUCCESS;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
/* For MPLSoUDP */
memset(&filter_replace, 0,
sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
@@ -7152,6 +7459,10 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
+ PMD_DRV_LOG(DEBUG, "Global configuration modification: "
+ "cloud filter type is changed from 0x%x to 0x%x",
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* For MPLSoGRE */
memset(&filter_replace, 0,
@@ -7174,6 +7485,13 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
+ if (!status) {
+ i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
+ PMD_DRV_LOG(DEBUG, "Global configuration modification: "
+ "cloud filter type is changed from 0x%x to 0x%x",
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+ }
return status;
}
@@ -7185,6 +7503,11 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
enum i40e_status_code status = I40E_SUCCESS;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
/* For GTP-C */
memset(&filter_replace, 0,
sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
@@ -7213,6 +7536,10 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
+ PMD_DRV_LOG(DEBUG, "Global configuration modification: "
+ "cloud l1 type is changed from 0x%x to 0x%x",
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* for GTP-U */
memset(&filter_replace, 0,
@@ -7241,6 +7568,13 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
+ if (!status) {
+ i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
+ PMD_DRV_LOG(DEBUG, "Global configuration modification: "
+ "cloud l1 type is changed from 0x%x to 0x%x",
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+ }
return status;
}
@@ -7252,6 +7586,11 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
enum i40e_status_code status = I40E_SUCCESS;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return I40E_NOT_SUPPORTED;
+ }
+
/* for GTP-C */
memset(&filter_replace, 0,
sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
@@ -7272,6 +7611,10 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
+ PMD_DRV_LOG(DEBUG, "Global configuration modification: "
+ "cloud filter type is changed from 0x%x to 0x%x",
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* for GTP-U */
memset(&filter_replace, 0,
@@ -7293,6 +7636,13 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
+ if (!status) {
+ i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
+ PMD_DRV_LOG(DEBUG, "Global configuration modification: "
+ "cloud filter type is changed from 0x%x to 0x%x",
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+ }
return status;
}
@@ -7302,7 +7652,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
uint8_t add)
{
uint16_t ip_type;
- uint32_t ipv4_addr;
+ uint32_t ipv4_addr, ipv4_addr_le;
uint8_t i, tun_type = 0;
/* internal variable to convert ipv6 byte order */
uint32_t convert_ipv6[4];
@@ -7338,8 +7688,9 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
rte_memcpy(&pfilter->element.ipaddr.v4.data,
- &rte_cpu_to_le_32(ipv4_addr),
+ &ipv4_addr_le,
sizeof(pfilter->element.ipaddr.v4.data));
} else {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
@@ -7486,6 +7837,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
else {
if (tunnel_filter->vf_id >= pf->vf_num) {
PMD_DRV_LOG(ERR, "Invalid argument.");
+ rte_free(cld_filter);
return -EINVAL;
}
vf = &pf->vfs[tunnel_filter->vf_id];
@@ -7500,11 +7852,13 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
if (add && node) {
PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ rte_free(cld_filter);
return -EINVAL;
}
if (!add && !node) {
PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ rte_free(cld_filter);
return -EINVAL;
}
@@ -7517,11 +7871,20 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
vsi->seid, &cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ rte_free(cld_filter);
return -ENOTSUP;
}
tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ if (tunnel == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ rte_free(cld_filter);
+ return -ENOMEM;
+ }
+
rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ if (ret < 0)
+ rte_free(tunnel);
} else {
if (big_buffer)
ret = i40e_aq_remove_cloud_filters_big_buffer(
@@ -7531,6 +7894,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
&cld_filter->element, 1);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ rte_free(cld_filter);
return -ENOTSUP;
}
ret = i40e_sw_tunnel_filter_del(pf, &node->input);
@@ -7808,9 +8172,15 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf,
static int
i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
{
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
uint32_t val, reg;
int ret = -EINVAL;
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
+ return -ENOTSUP;
+ }
+
val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
@@ -7828,6 +8198,10 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
reg, NULL);
if (ret != 0)
return ret;
+ PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
+ "with value 0x%08x",
+ I40E_GL_PRS_FVBM(2), reg);
+ i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
} else {
ret = 0;
}
@@ -7984,14 +8358,17 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw,
(reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
/*
- * We work only with lowest 32 bits which is not correct, but to work
- * properly the valid_bit_mask size should be increased up to 64 bits
- * and this will brake ABI. This modification will be done in next
- * release
+ * As i40e supports less than 64 flow types, only first 64 bits need to
+ * be checked.
*/
- g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask;
+ for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
+ g_cfg->valid_bit_mask[i] = 0ULL;
+ g_cfg->sym_hash_enable_mask[i] = 0ULL;
+ }
+
+ g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) {
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
if (!adapter->pctypes_tbl[i])
continue;
for (j = I40E_FILTER_PCTYPE_INVALID + 1;
@@ -8000,7 +8377,7 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw,
reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
g_cfg->sym_hash_enable_mask[0] |=
- (1UL << i);
+ (1ULL << i);
}
}
}
@@ -8014,7 +8391,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter,
const struct rte_eth_hash_global_conf *g_cfg)
{
uint32_t i;
- uint32_t mask0, i40e_mask = adapter->flow_types_mask;
+ uint64_t mask0, i40e_mask = adapter->flow_types_mask;
if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
@@ -8025,7 +8402,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter,
}
/*
- * As i40e supports less than 32 flow types, only first 32 bits need to
+ * As i40e supports less than 64 flow types, only first 64 bits need to
* be checked.
*/
mask0 = g_cfg->valid_bit_mask[0];
@@ -8058,35 +8435,39 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
struct rte_eth_hash_global_conf *g_cfg)
{
struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
+ struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
int ret;
uint16_t i, j;
uint32_t reg;
- /*
- * We work only with lowest 32 bits which is not correct, but to work
- * properly the valid_bit_mask size should be increased up to 64 bits
- * and this will brake ABI. This modification will be done in next
- * release
- */
- uint32_t mask0 = g_cfg->valid_bit_mask[0] &
- (uint32_t)adapter->flow_types_mask;
+ uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
+
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
+ return -ENOTSUP;
+ }
/* Check the input parameters */
ret = i40e_hash_global_config_check(adapter, g_cfg);
if (ret < 0)
return ret;
- for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) {
+ /*
+ * As i40e supports less than 64 flow types, only first 64 bits need to
+ * be configured.
+ */
+ for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
if (mask0 & (1UL << i)) {
- reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
+ reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
for (j = I40E_FILTER_PCTYPE_INVALID + 1;
j < I40E_FILTER_PCTYPE_MAX; j++) {
if (adapter->pctypes_tbl[i] & (1ULL << j))
- i40e_write_rx_ctl(hw,
+ i40e_write_global_rx_ctl(hw,
I40E_GLQF_HSYM(j),
reg);
}
+ i40e_global_cfg_warning(I40E_WARNING_HSYM);
}
}
@@ -8111,7 +8492,8 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
/* Use the default, and keep it as it is */
goto out;
- i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
+ i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
out:
I40E_WRITE_FLUSH(hw);
@@ -8700,6 +9082,18 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
(uint32_t)i40e_read_rx_ctl(hw, addr));
}
+void
+i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
+{
+ uint32_t reg = i40e_read_rx_ctl(hw, addr);
+
+ PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
+ if (reg != val)
+ i40e_write_global_rx_ctl(hw, addr, val);
+ PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
+ (uint32_t)i40e_read_rx_ctl(hw, addr));
+}
+
static void
i40e_filter_input_set_init(struct i40e_pf *pf)
{
@@ -8723,6 +9117,10 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
I40E_INSET_MASK_NUM_REG);
if (num < 0)
return;
+ if (pf->support_multi_driver && num > 0) {
+ PMD_DRV_LOG(ERR, "Input set setting is not supported.");
+ return;
+ }
inset_reg = i40e_translate_input_set_reg(hw->mac.type,
input_set);
@@ -8731,31 +9129,48 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
- (uint32_t)(inset_reg & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
- (uint32_t)((inset_reg >>
- I40E_32_BIT_WIDTH) & UINT32_MAX));
-
- for (i = 0; i < num; i++) {
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- mask_reg[i]);
- }
- /*clear unused mask registers of the pctype */
- for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- 0);
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- 0);
+ if (!pf->support_multi_driver) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+ for (i = 0; i < num; i++) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ }
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "Input set setting is not supported.");
}
I40E_WRITE_FLUSH(hw);
/* store the default input set */
- pf->hash_input_set[pctype] = input_set;
+ if (!pf->support_multi_driver)
+ pf->hash_input_set[pctype] = input_set;
pf->fdir.input_set[pctype] = input_set;
}
+
+ if (!pf->support_multi_driver) {
+ i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
+ i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
+ i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
+ }
}
int
@@ -8778,6 +9193,11 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
return -EINVAL;
}
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
+ return -ENOTSUP;
+ }
+
pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
if (pctype == I40E_FILTER_PCTYPE_INVALID) {
PMD_DRV_LOG(ERR, "invalid flow_type input.");
@@ -8811,19 +9231,21 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
- (uint32_t)(inset_reg & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
- (uint32_t)((inset_reg >>
- I40E_32_BIT_WIDTH) & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+ i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
for (i = 0; i < num; i++)
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- mask_reg[i]);
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
/*clear unused mask registers of the pctype */
for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- 0);
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ 0);
+ i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
I40E_WRITE_FLUSH(hw);
pf->hash_input_set[pctype] = input_set;
@@ -8881,6 +9303,10 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
I40E_INSET_MASK_NUM_REG);
if (num < 0)
return -EINVAL;
+ if (pf->support_multi_driver && num > 0) {
+ PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+ return -ENOTSUP;
+ }
inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
@@ -8890,13 +9316,20 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
- for (i = 0; i < num; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
- /*clear unused mask registers of the pctype */
- for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- 0);
+ if (!pf->support_multi_driver) {
+ for (i = 0; i < num; i++)
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ /*clear unused mask registers of the pctype */
+ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ 0);
+ i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
+ } else {
+ PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+ }
I40E_WRITE_FLUSH(hw);
pf->fdir.input_set[pctype] = input_set;
@@ -9142,9 +9575,16 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
if (add) {
ethertype_filter = rte_zmalloc("ethertype_filter",
sizeof(*ethertype_filter), 0);
+ if (ethertype_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+
rte_memcpy(ethertype_filter, &check_filter,
sizeof(check_filter));
ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ if (ret < 0)
+ rte_free(ethertype_filter);
} else {
ret = i40e_sw_ethertype_filter_del(pf, &node->input);
}
@@ -10679,27 +11119,21 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
if (msix_intr == I40E_MISC_VEC_ID)
I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
else
I40E_WRITE_REG(hw,
I40E_PFINT_DYN_CTLN(msix_intr -
I40E_RX_VEC_START),
I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (interval <<
- I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
rte_intr_enable(&pci_dev->intr_handle);
@@ -10717,12 +11151,13 @@ i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
msix_intr = intr_handle->intr_vec[queue_id];
if (msix_intr == I40E_MISC_VEC_ID)
- I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+ I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
else
I40E_WRITE_REG(hw,
I40E_PFINT_DYN_CTLN(msix_intr -
I40E_RX_VEC_START),
- 0);
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
I40E_WRITE_FLUSH(hw);
return 0;
@@ -10818,14 +11253,43 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_mac_filter_info mac_filter;
+ struct i40e_mac_filter *f;
+ int ret;
if (!is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
return;
}
- /* Flags: 0x3 updates port address */
- i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
+ break;
+ }
+
+ if (f == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
+ return;
+ }
+
+ mac_filter = f->mac_info;
+ ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to delete mac filter");
+ return;
+ }
+ memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
+ ret = i40e_vsi_add_mac(vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add mac filter");
+ return;
+ }
+ memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
+
+ i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ mac_addr->addr_bytes, NULL);
}
static int
@@ -10943,12 +11407,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
}
}
+/* Restore rss filter */
+static inline void
+i40e_rss_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_rte_flow_rss_conf *conf =
+ &pf->rss_info;
+ if (conf->num)
+ i40e_config_rss_filter(pf, conf, TRUE);
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
+ i40e_rss_filter_restore(pf);
}
static bool
@@ -11078,7 +11553,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
uint8_t proto_id;
char name[RTE_PMD_I40E_DDP_NAME_SIZE];
uint32_t i, j, n;
- bool inner_ip;
+ bool in_tunnel;
int ret;
/* get information about new ptype num */
@@ -11123,7 +11598,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
for (i = 0; i < ptype_num; i++) {
ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
ptype_mapping[i].sw_ptype = 0;
- inner_ip = false;
+ in_tunnel = false;
for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
proto_id = ptype[i].protocols[j];
if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
@@ -11133,54 +11608,108 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
continue;
memset(name, 0, sizeof(name));
strcpy(name, proto[n].name);
- if (!strncmp(name, "IPV4", 4) && !inner_ip) {
+ if (!strncasecmp(name, "PPPOE", 5))
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L2_ETHER_PPPOE;
+ else if (!strncasecmp(name, "IPV4FRAG", 8) &&
+ !in_tunnel) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
- inner_ip = true;
- } else if (!strncmp(name, "IPV4FRAG", 8) &&
- inner_ip) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_FRAG;
+ } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
+ in_tunnel) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L4_FRAG;
- } else if (!strncmp(name, "IPV4", 4) &&
- inner_ip)
+ } else if (!strncasecmp(name, "OIPV4", 5)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "IPV4", 4) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "IPV4", 4) &&
+ in_tunnel)
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
- else if (!strncmp(name, "IPV6", 4) &&
- !inner_ip) {
+ else if (!strncasecmp(name, "IPV6FRAG", 8) &&
+ !in_tunnel) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
- inner_ip = true;
- } else if (!strncmp(name, "IPV6FRAG", 8) &&
- inner_ip) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_FRAG;
+ } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
+ in_tunnel) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L4_FRAG;
- } else if (!strncmp(name, "IPV6", 4) &&
- inner_ip)
+ } else if (!strncasecmp(name, "OIPV6", 5)) {
ptype_mapping[i].sw_ptype |=
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
- else if (!strncmp(name, "GTPC", 4))
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "IPV6", 4) &&
+ !in_tunnel)
ptype_mapping[i].sw_ptype |=
- RTE_PTYPE_TUNNEL_GTPC;
- else if (!strncmp(name, "GTPU", 4))
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "IPV6", 4) &&
+ in_tunnel)
ptype_mapping[i].sw_ptype |=
- RTE_PTYPE_TUNNEL_GTPU;
- else if (!strncmp(name, "UDP", 3))
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ else if (!strncasecmp(name, "UDP", 3) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_UDP;
+ else if (!strncasecmp(name, "UDP", 3) &&
+ in_tunnel)
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L4_UDP;
- else if (!strncmp(name, "TCP", 3))
+ else if (!strncasecmp(name, "TCP", 3) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_TCP;
+ else if (!strncasecmp(name, "TCP", 3) &&
+ in_tunnel)
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L4_TCP;
- else if (!strncmp(name, "SCTP", 4))
+ else if (!strncasecmp(name, "SCTP", 4) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_SCTP;
+ else if (!strncasecmp(name, "SCTP", 4) &&
+ in_tunnel)
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L4_SCTP;
- else if (!strncmp(name, "ICMP", 4) ||
- !strncmp(name, "ICMPV6", 6))
+ else if ((!strncasecmp(name, "ICMP", 4) ||
+ !strncasecmp(name, "ICMPV6", 6)) &&
+ !in_tunnel)
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_L4_ICMP;
+ else if ((!strncasecmp(name, "ICMP", 4) ||
+ !strncasecmp(name, "ICMPV6", 6)) &&
+ in_tunnel)
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_INNER_L4_ICMP;
+ else if (!strncasecmp(name, "GTPC", 4)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GTPC;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "GTPU", 4)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GTPU;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "GRENAT", 6)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_GRENAT;
+ in_tunnel = true;
+ } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
+ ptype_mapping[i].sw_ptype |=
+ RTE_PTYPE_TUNNEL_L2TP;
+ in_tunnel = true;
+ }
break;
}
@@ -11312,6 +11841,11 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
+ return ret;
+ }
+
/* Init */
memset(&filter_replace, 0,
sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
@@ -11342,6 +11876,10 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
&filter_replace_buf);
if (ret != I40E_SUCCESS)
return ret;
+ PMD_DRV_LOG(DEBUG, "Global configuration modification: "
+ "cloud l1 type is changed from 0x%x to 0x%x",
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* Apply the second L2 cloud filter */
memset(&filter_replace, 0,
@@ -11363,17 +11901,104 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
+ if (!ret) {
+ i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
+ PMD_DRV_LOG(DEBUG, "Global configuration modification: "
+ "cloud filter type is changed from 0x%x to 0x%x",
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
+ }
return ret;
}
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t i, lut = 0;
+ uint16_t j, num;
+ struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+
+ if (!add) {
+ if (memcmp(conf, rss_info,
+ sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+ i40e_pf_disable_rss(pf);
+ memset(rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (rss_info->num)
+ return -EINVAL;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, conf->num);
+ PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
+ return -ENOTSUP;
+ }
+
+ /* Fill in redirection table */
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
+ i40e_pf_disable_rss(pf);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ i40e_hw_rss_hash_set(pf, &rss_conf);
+
+ rte_memcpy(rss_info,
+ conf, sizeof(struct i40e_rte_flow_rss_conf));
+
+ return 0;
+}
+
RTE_INIT(i40e_init_log);
static void
i40e_init_log(void)
{
- i40e_logtype_init = rte_log_register("pmd.i40e.init");
+ i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
if (i40e_logtype_init >= 0)
rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
- i40e_logtype_driver = rte_log_register("pmd.i40e.driver");
+ i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
if (i40e_logtype_driver >= 0)
rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
}
+
+RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
+ QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
+ ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index cd67453d..99efb670 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#ifndef _I40E_ETHDEV_H_
@@ -61,7 +32,8 @@
#define I40E_NUM_MACADDR_MAX 64
/* Maximum number of VFs */
#define I40E_MAX_VF 128
-
+/*flag of no loopback*/
+#define I40E_AQ_LB_MODE_NONE 0x0
/*
* vlan_id is a 12 bit number.
* The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
@@ -106,6 +78,15 @@
(((vf)->version_major == VIRTCHNL_VERSION_MAJOR) && \
((vf)->version_minor == 1))
+#define I40E_WRITE_GLB_REG(hw, reg, value) \
+ do { \
+ I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \
+ (reg)), (value)); \
+ PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " \
+ "with value 0x%08x", \
+ (reg), (value)); \
+ } while (0)
+
/* index flex payload per layer */
enum i40e_flxpld_layer_idx {
I40E_FLXPLD_L2_IDX = 0,
@@ -189,6 +170,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_ITR_INDEX_NONE 3
#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 8160 /* 8160 us */
/* Special FW support this floating VEB feature */
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
@@ -353,7 +335,7 @@ struct i40e_vsi {
* needs to add, HW needs to know the layout that VSIs are organized.
* Besides that, VSI isan element and can't switch packets, which needs
* to add new component VEB to perform switching. So, a new VSI needs
- * to specify the the uplink VSI (Parent VSI) before created. The
+ * to specify the uplink VSI (Parent VSI) before created. The
* uplink VSI will check whether it had a VEB to switch packets. If no,
* it will try to create one. Then, uplink VSI will move the new VSI
* into its' sib_vsi_list to manage all the downlink VSI.
@@ -426,6 +408,9 @@ struct i40e_pf_vf {
uint16_t lan_nb_qps; /* Actual queues allocated */
uint16_t reset_cnt; /* Total vf reset times */
struct ether_addr mac_addr; /* Default MAC address */
+ /* version of the virtchnl from VF */
+ struct virtchnl_version_info version;
+ uint32_t request_caps; /* offload caps requested from VF */
};
/*
@@ -891,6 +876,13 @@ struct i40e_customized_pctype {
bool valid; /* Check if it's valid */
};
+struct i40e_rte_flow_rss_conf {
+ struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ uint16_t queue_region_conf; /**< Queue region config flag */
+ uint16_t num; /**< Number of entries in queue[]. */
+ uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
+};
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -945,6 +937,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
+ struct i40e_rte_flow_rss_conf rss_info; /* rss info */
struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
@@ -957,6 +950,7 @@ struct i40e_pf {
bool gtp_replace_flag; /* 1 - GTP-C/U filter replace is done */
bool qinq_replace_flag; /* QINQ filter replace is done */
struct i40e_tm_conf tm_conf;
+ bool support_multi_driver; /* 1 - support multiple driver */
/* Dynamic Device Personalization */
bool gtp_support; /* 1 - support GTP-C and GTP-U */
@@ -1071,6 +1065,7 @@ union i40e_filter_t {
struct i40e_fdir_filter_conf fdir_filter;
struct rte_eth_tunnel_filter_conf tunnel_filter;
struct i40e_tunnel_filter_conf consistent_tunnel_filter;
+ struct i40e_rte_flow_rss_conf rss_conf;
};
typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
@@ -1084,6 +1079,22 @@ struct i40e_valid_pattern {
parse_filter_t parse_filter;
};
+enum I40E_WARNING_IDX {
+ I40E_WARNING_DIS_FLX_PLD,
+ I40E_WARNING_ENA_FLX_PLD,
+ I40E_WARNING_QINQ_PARSER,
+ I40E_WARNING_QINQ_CLOUD_FILTER,
+ I40E_WARNING_TPID,
+ I40E_WARNING_FLOW_CTL,
+ I40E_WARNING_GRE_KEY_LEN,
+ I40E_WARNING_QF_CTL,
+ I40E_WARNING_HASH_INSET,
+ I40E_WARNING_HSYM,
+ I40E_WARNING_HASH_MSK,
+ I40E_WARNING_FD_MSK,
+ I40E_WARNING_RPL_CLD_FILTER,
+};
+
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
@@ -1186,6 +1197,8 @@ int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask,
uint8_t nb_elem);
uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input);
void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val);
+void i40e_check_write_global_reg(struct i40e_hw *hw,
+ uint32_t addr, uint32_t val);
int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops);
void i40e_tm_conf_init(struct rte_eth_dev *dev);
@@ -1198,6 +1211,11 @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
+void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
+int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
+int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
+int i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
@@ -1274,15 +1292,50 @@ i40e_align_floor(int n)
}
static inline uint16_t
-i40e_calc_itr_interval(int16_t interval)
+i40e_calc_itr_interval(int16_t interval, bool is_pf, bool is_multi_drv)
{
- if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
- interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+ if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) {
+ if (is_multi_drv) {
+ interval = I40E_QUEUE_ITR_INTERVAL_MAX;
+ } else {
+ if (is_pf)
+ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+ else
+ interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT;
+ }
+ }
/* Convert to hardware count, as writing each 1 represents 2 us */
return interval / 2;
}
+static inline void
+i40e_global_cfg_warning(enum I40E_WARNING_IDX idx)
+{
+ const char *warning;
+ static const char *const warning_list[] = {
+ [I40E_WARNING_DIS_FLX_PLD] = "disable FDIR flexible payload",
+ [I40E_WARNING_ENA_FLX_PLD] = "enable FDIR flexible payload",
+ [I40E_WARNING_QINQ_PARSER] = "support QinQ parser",
+ [I40E_WARNING_QINQ_CLOUD_FILTER] = "support QinQ cloud filter",
+ [I40E_WARNING_TPID] = "support TPID configuration",
+ [I40E_WARNING_FLOW_CTL] = "configure water marker",
+ [I40E_WARNING_GRE_KEY_LEN] = "support GRE key length setting",
+ [I40E_WARNING_QF_CTL] = "support hash function setting",
+ [I40E_WARNING_HASH_INSET] = "configure hash input set",
+ [I40E_WARNING_HSYM] = "set symmetric hash",
+ [I40E_WARNING_HASH_MSK] = "configure hash mask",
+ [I40E_WARNING_FD_MSK] = "configure fdir mask",
+ [I40E_WARNING_RPL_CLD_FILTER] = "replace cloud filter",
+ };
+
+ warning = warning_list[idx];
+
+ RTE_LOG(WARNING, PMD,
+ "Global register is changed during %s\n",
+ warning);
+}
+
#define I40E_VALID_FLOW(flow_type) \
((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 91b5bb03..fd003fe0 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <sys/queue.h>
@@ -54,7 +25,7 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -945,14 +916,16 @@ i40evf_update_stats(struct i40e_vsi *vsi,
static void
i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
{
+ int ret;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_eth_stats *pstats = NULL;
/* read stat values to clear hardware registers */
- i40evf_query_stats(dev, &pstats);
+ ret = i40evf_query_stats(dev, &pstats);
/* set stats offset base on current values */
- vf->vsi.eth_stats_offset = *pstats;
+ if (ret == 0)
+ vf->vsi.eth_stats_offset = *pstats;
}
static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
@@ -1165,7 +1138,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
vf->dev_data = dev->data;
@@ -1308,7 +1281,7 @@ i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg,
case VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
- NULL, NULL);
+ NULL);
break;
case VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
@@ -1585,13 +1558,19 @@ static int
i40evf_init_vlan(struct rte_eth_dev *dev)
{
/* Apply vlan offload setting */
- return i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+ i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
+
+ return 0;
}
static int
i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ return -ENOTSUP;
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
@@ -1862,7 +1841,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+ i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
@@ -1997,7 +1976,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_tx_queues);
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
intr_vector = dev->data->nb_rx_queues;
if (rte_intr_efd_enable(intr_handle, intr_vector))
return -1;
@@ -2214,14 +2194,22 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM;
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -2675,19 +2663,19 @@ i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
return;
}
- if (is_same_ether_addr(mac_addr, dev->data->mac_addrs))
- return;
-
if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
return;
- i40evf_del_mac_addr_by_addr(dev, dev->data->mac_addrs);
+ i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr);
i40evf_add_mac_addr(dev, mac_addr, 0, 0);
+
+ ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 3d7170d5..b83a0cff 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <sys/queue.h>
@@ -40,7 +11,7 @@
#include <stdarg.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_log.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
@@ -95,17 +66,17 @@
#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
#define I40E_FDIR_FLOWS ( \
- (1 << RTE_ETH_FLOW_FRAG_IPV4) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
- (1 << RTE_ETH_FLOW_FRAG_IPV6) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
- (1 << RTE_ETH_FLOW_L2_PAYLOAD))
+ (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
+ (1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
@@ -168,7 +139,6 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
rte_wmb();
/* Init the RX tail regieter. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
return err;
@@ -534,7 +504,7 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf,
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
- uint32_t flx_pit;
+ uint32_t flx_pit, flx_ort;
uint16_t num, min_next_off; /* in words */
uint8_t field_idx = 0;
uint8_t layer_idx = 0;
@@ -548,9 +518,18 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf,
layer_idx = I40E_FLXPLD_L4_IDX;
memset(flex_pit, 0, sizeof(flex_pit));
- num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
+ num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
+ RTE_DIM(flex_pit));
- for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) {
+ if (num) {
+ flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
+ (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
+ (layer_idx * I40E_MAX_FLXPLD_FIED);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+ i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
+ }
+
+ for (i = 0; i < num; i++) {
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
/* record the info in fdir structure */
pf->fdir.flex_set[field_idx].src_offset =
@@ -670,22 +649,31 @@ i40e_fdir_configure(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, " invalid configuration arguments.");
return -EINVAL;
}
- /* configure flex payload */
- for (i = 0; i < conf->nb_payloads; i++)
- i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
- /* configure flex mask*/
- for (i = 0; i < conf->nb_flexmasks; i++) {
- if (hw->mac.type == I40E_MAC_X722) {
- /* get translated pctype value in fd pctype register */
- pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
- hw, I40E_GLQF_FD_PCTYPES(
- (int)i40e_flowtype_to_pctype(pf->adapter,
- conf->flex_mask[i].flow_type)));
- } else
- pctype = i40e_flowtype_to_pctype(pf->adapter,
- conf->flex_mask[i].flow_type);
- i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
+ if (!pf->support_multi_driver) {
+ /* configure flex payload */
+ for (i = 0; i < conf->nb_payloads; i++)
+ i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
+ /* configure flex mask*/
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)
+ i40e_read_rx_ctl(hw,
+ I40E_GLQF_FD_PCTYPES(
+ (int)i40e_flowtype_to_pctype(
+ pf->adapter,
+ conf->flex_mask[i].flow_type)));
+ } else {
+ pctype = i40e_flowtype_to_pctype(pf->adapter,
+ conf->flex_mask[i].flow_type);
+ }
+
+ i40e_set_flex_mask_on_pctype(pf, pctype,
+ &conf->flex_mask[i]);
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "Not support flexible payload.");
}
return ret;
@@ -1363,13 +1351,18 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
PMD_DRV_LOG(ERR, "invalid programming status"
" reported, error = %u.", error);
} else
- PMD_DRV_LOG(ERR, "unknown programming status"
+ PMD_DRV_LOG(INFO, "unknown programming status"
" reported, len = %d, id = %u.", len, id);
rxdp->wb.qword1.status_error_len = 0;
rxq->rx_tail++;
if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
rxq->rx_tail = 0;
+ if (rxq->rx_tail == 0)
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ else
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
}
+
return ret;
}
@@ -1612,8 +1605,15 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
if (add) {
fdir_filter = rte_zmalloc("fdir_filter",
sizeof(*fdir_filter), 0);
+ if (fdir_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+
rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ if (ret < 0)
+ rte_free(fdir_filter);
} else {
ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
}
@@ -2020,6 +2020,7 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint16_t num_flex_set = 0;
uint16_t num_flex_mask = 0;
+ uint16_t i;
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
fdir->mode = RTE_FDIR_MODE_PERFECT;
@@ -2032,6 +2033,8 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
(uint32_t)hw->func_caps.fd_filters_best_effort;
fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
+ for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
+ fdir->flow_types_mask[i] = 0ULL;
fdir->flex_payload_unit = sizeof(uint16_t);
fdir->flex_bitmask_unit = sizeof(uint16_t);
fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 7e4936e3..16c47cf7 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <sys/queue.h>
@@ -39,7 +11,7 @@
#include <stdarg.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_eth_ctrl.h>
@@ -138,6 +110,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
+static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -2015,8 +1989,8 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Get the MAC info. */
if (!eth_spec || !eth_mask) {
rte_flow_error_set(error, EINVAL,
@@ -2101,7 +2075,7 @@ i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
}
if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- act_q = (const struct rte_flow_action_queue *)act->conf;
+ act_q = act->conf;
filter->queue = act_q->index;
if (filter->queue >= pf->dev_data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
@@ -2276,11 +2250,19 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
uint8_t raw_id)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t flx_pit;
+ uint32_t flx_pit, flx_ort;
uint8_t field_idx;
uint16_t min_next_off = 0; /* in words */
uint8_t i;
+ if (raw_id) {
+ flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
+ (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
+ (layer_idx * I40E_MAX_FLXPLD_FIED);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+ i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
+ }
+
/* Set flex pit */
for (i = 0; i < raw_id; i++) {
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
@@ -2496,8 +2478,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
if (eth_spec && eth_mask) {
if (!is_zero_ether_addr(&eth_mask->src) ||
@@ -2534,10 +2516,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
@@ -2553,10 +2533,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
l3 = RTE_FLOW_ITEM_TYPE_IPV4;
- ipv4_spec =
- (const struct rte_flow_item_ipv4 *)item->spec;
- ipv4_mask =
- (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2621,10 +2599,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
l3 = RTE_FLOW_ITEM_TYPE_IPV6;
- ipv6_spec =
- (const struct rte_flow_item_ipv6 *)item->spec;
- ipv6_mask =
- (const struct rte_flow_item_ipv6 *)item->mask;
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2692,8 +2668,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
outer_ip = false;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
@@ -2740,8 +2716,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_UDP:
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_spec = item->spec;
+ udp_mask = item->mask;
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
@@ -2793,8 +2769,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
- gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
- gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
if (gtp_spec && gtp_mask) {
if (gtp_mask->v_pt_rsv_flags ||
@@ -2815,10 +2791,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_SCTP:
- sctp_spec =
- (const struct rte_flow_item_sctp *)item->spec;
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
@@ -2866,8 +2840,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_RAW:
- raw_spec = (const struct rte_flow_item_raw *)item->spec;
- raw_mask = (const struct rte_flow_item_raw *)item->mask;
+ raw_spec = item->spec;
+ raw_mask = item->mask;
if (!raw_spec || !raw_mask) {
rte_flow_error_set(error, EINVAL,
@@ -2877,6 +2851,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
+ if (pf->support_multi_driver) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported flexible payload.");
+ return -rte_errno;
+ }
+
ret = i40e_flow_check_raw_item(item, raw_spec, error);
if (ret < 0)
return ret;
@@ -2935,7 +2917,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
raw_id++;
break;
case RTE_FLOW_ITEM_TYPE_VF:
- vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ vf_spec = item->spec;
filter->input.flow_ext.is_vf = 1;
filter->input.flow_ext.dst_id = vf_spec->id;
if (filter->input.flow_ext.is_vf &&
@@ -3027,7 +3009,7 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
- act_q = (const struct rte_flow_action_queue *)act->conf;
+ act_q = act->conf;
filter->action.rx_queue = act_q->index;
if ((!filter->input.flow_ext.is_vf &&
filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
@@ -3058,7 +3040,7 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_MARK:
- mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ mark_spec = act->conf;
filter->action.report_status = I40E_FDIR_REPORT_ID;
filter->soft_id = mark_spec->id;
break;
@@ -3149,7 +3131,7 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
}
if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
- act_vf = (const struct rte_flow_action_vf *)act->conf;
+ act_vf = act->conf;
filter->vf_id = act_vf->id;
filter->is_to_vf = 1;
if (filter->vf_id >= pf->vf_num) {
@@ -3164,7 +3146,7 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- act_q = (const struct rte_flow_action_queue *)act->conf;
+ act_q = act->conf;
filter->queue_id = act_q->index;
if ((!filter->is_to_vf) &&
(filter->queue_id >= pf->dev_data->nb_rx_queues)) {
@@ -3256,8 +3238,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Check if ETH item is used for place holder.
* If yes, both spec and mask should be NULL.
@@ -3300,10 +3282,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -3360,10 +3340,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- vxlan_spec =
- (const struct rte_flow_item_vxlan *)item->spec;
- vxlan_mask =
- (const struct rte_flow_item_vxlan *)item->mask;
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
/* Check if VXLAN item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
@@ -3489,8 +3467,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Check if ETH item is used for place holder.
* If yes, both spec and mask should be NULL.
@@ -3534,10 +3512,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -3582,10 +3558,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- nvgre_spec =
- (const struct rte_flow_item_nvgre *)item->spec;
- nvgre_mask =
- (const struct rte_flow_item_nvgre *)item->mask;
+ nvgre_spec = item->spec;
+ nvgre_mask = item->mask;
/* Check if NVGRE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
@@ -3610,6 +3584,41 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
"Invalid TNI mask");
return -rte_errno;
}
+ if (nvgre_mask->protocol &&
+ nvgre_mask->protocol != 0xFFFF) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+ if (nvgre_mask->c_k_s_rsvd0_ver &&
+ nvgre_mask->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0xFFFF)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+ if (nvgre_spec->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0x2000) &&
+ nvgre_mask->c_k_s_rsvd0_ver) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+ if (nvgre_mask->protocol &&
+ nvgre_spec->protocol !=
+ rte_cpu_to_be_16(0x6558)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
rte_memcpy(((uint8_t *)&tenant_id_be + 1),
nvgre_spec->tni, 3);
filter->tenant_id =
@@ -3761,10 +3770,8 @@ i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
- mpls_spec =
- (const struct rte_flow_item_mpls *)item->spec;
- mpls_mask =
- (const struct rte_flow_item_mpls *)item->mask;
+ mpls_spec = item->spec;
+ mpls_mask = item->mask;
if (!mpls_spec || !mpls_mask) {
rte_flow_error_set(error, EINVAL,
@@ -3900,10 +3907,8 @@ i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_GTPC:
case RTE_FLOW_ITEM_TYPE_GTPU:
- gtp_spec =
- (const struct rte_flow_item_gtp *)item->spec;
- gtp_mask =
- (const struct rte_flow_item_gtp *)item->mask;
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
if (!gtp_spec || !gtp_mask) {
rte_flow_error_set(error, EINVAL,
@@ -4014,10 +4019,8 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
@@ -4094,6 +4097,317 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
return ret;
}
+/**
+ * This function is used to do configuration i40e existing RSS with rte_flow.
+ * It also enable queue region configuration using flow API for i40e.
+ * pattern can be used indicate what parameters will be include in flow,
+ * like user_priority or flowtype for queue region or HASH function for RSS.
+ * Action is used to transmit parameter like queue index and HASH
+ * function for RSS, or flowtype for queue region configuration.
+ * For example:
+ * pattern:
+ * Case 1: only ETH, indicate flowtype for queue region will be parsed.
+ * Case 2: only VLAN, indicate user_priority for queue region will be parsed.
+ * Case 3: none, indicate RSS related will be parsed in action.
+ * Any pattern other the ETH or VLAN will be treated as invalid except END.
+ * So, pattern choice is depened on the purpose of configuration of
+ * that flow.
+ * action:
+ * action RSS will be uaed to transmit valid parameter with
+ * struct rte_flow_action_rss for all the 3 case.
+ */
+static int
+i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *info)
+{
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ return 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ *action_flag = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ info->region[0].user_priority[0] =
+ (vlan_spec->tci >> 13) & 0x7;
+ info->region[0].user_priority_num = 1;
+ info->queue_region_number = 1;
+ *action_flag = 0;
+ }
+ }
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ uint8_t action_flag,
+ struct i40e_queue_regions *conf_info,
+ union i40e_filter_t *filter)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_rte_flow_rss_conf *rss_config =
+ &filter->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ uint16_t i, j, n, tmp;
+ uint32_t index = 0;
+ uint64_t hf_bit = 1;
+
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ rss = act->conf;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (action_flag) {
+ for (n = 0; n < 64; n++) {
+ if (rss->rss_conf->rss_hf & (hf_bit << n)) {
+ conf_info->region[0].hw_flowtype[0] = n;
+ conf_info->region[0].flowtype_num = 1;
+ conf_info->queue_region_number = 1;
+ break;
+ }
+ }
+ }
+
+ for (n = 0; n < conf_info->queue_region_number; n++) {
+ if (conf_info->region[n].user_priority_num ||
+ conf_info->region[n].flowtype_num) {
+ if (!((rte_is_power_of_2(rss->num)) &&
+ rss->num <= 64)) {
+ PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].user_priority[n] >=
+ I40E_MAX_USER_PRIORITY) {
+ PMD_DRV_LOG(ERR, "the user priority max index is 7");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].hw_flowtype[n] >=
+ I40E_FILTER_PCTYPE_MAX) {
+ PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+ return -rte_errno;
+ }
+
+ if (rss_info->num < rss->num ||
+ rss_info->queue[0] < rss->queue[0] ||
+ (rss->queue[0] + rss->num >
+ rss_info->num + rss_info->queue[0])) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ if (info->region[i].queue_num == rss->num &&
+ info->region[i].queue_start_index ==
+ rss->queue[0])
+ break;
+ }
+
+ if (i == info->queue_region_number) {
+ if (i > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return -rte_errno;
+ }
+
+ info->region[i].queue_num =
+ rss->num;
+ info->region[i].queue_start_index =
+ rss->queue[0];
+ info->region[i].region_id =
+ info->queue_region_number;
+
+ j = info->region[i].user_priority_num;
+ tmp = conf_info->region[n].user_priority[0];
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] = tmp;
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ tmp = conf_info->region[n].hw_flowtype[0];
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] = tmp;
+ info->region[i].flowtype_num++;
+ }
+ info->queue_region_number++;
+ } else {
+ j = info->region[i].user_priority_num;
+ tmp = conf_info->region[n].user_priority[0];
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] = tmp;
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ tmp = conf_info->region[n].hw_flowtype[0];
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] = tmp;
+ info->region[i].flowtype_num++;
+ }
+ }
+ }
+
+ rss_config->queue_region_conf = TRUE;
+ }
+
+ if (rss_config->queue_region_conf)
+ return 0;
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+ if (rss->rss_conf)
+ rss_config->rss_conf = *rss->rss_conf;
+ else
+ rss_config->rss_conf.rss_hf =
+ pf->adapter->flow_types_mask;
+
+ for (n = 0; n < rss->num; ++n)
+ rss_config->queue[n] = rss->queue[n];
+ rss_config->num = rss->num;
+ index++;
+
+ /* check if the next not void action is END */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+ rss_config->queue_region_conf = FALSE;
+
+ return 0;
+}
+
+static int
+i40e_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ union i40e_filter_t *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct i40e_queue_regions info;
+ uint8_t action_flag = 0;
+
+ memset(&info, 0, sizeof(struct i40e_queue_regions));
+
+ ret = i40e_flow_parse_rss_pattern(dev, pattern,
+ error, &action_flag, &info);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_rss_action(dev, actions, error,
+ action_flag, &info, filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_HASH;
+
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_set(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (conf->queue_region_conf) {
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ conf->queue_region_conf = 0;
+ } else {
+ i40e_config_rss_filter(pf, conf, 1);
+ }
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_del(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ i40e_config_rss_filter(pf, conf, 0);
+ return 0;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -4130,6 +4444,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
memset(&cons_filter, 0, sizeof(cons_filter));
+ /* Get the non-void item of action */
+ while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ i++;
+
+ if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ ret = i40e_parse_rss_filter(dev, attr, pattern,
+ actions, &cons_filter, error);
+ return ret;
+ }
+
+ i = 0;
/* Get the non-void item number of pattern */
while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
@@ -4217,6 +4542,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
i40e_tunnel_filter_list);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_set(dev,
+ &cons_filter.rss_conf);
+ flow->rule = &pf->rss_info;
+ break;
default:
goto free_flow;
}
@@ -4255,6 +4585,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_del(dev,
+ (struct i40e_rte_flow_rss_conf *)flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -4397,6 +4731,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_flow_flush_rss_filter(dev);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush rss flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -4406,6 +4748,7 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
struct rte_eth_dev *dev = pf->adapter->eth_dev;
struct i40e_fdir_info *fdir_info = &pf->fdir;
struct i40e_fdir_filter *fdir_filter;
+ enum i40e_filter_pctype pctype;
struct rte_flow *flow;
void *temp;
int ret;
@@ -4427,6 +4770,10 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
rte_free(flow);
}
}
+
+ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
+ pf->fdir.inset_flag[pctype] = 0;
}
return ret;
@@ -4487,3 +4834,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
return ret;
}
+
+/* remove the rss filter */
+static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret = -EINVAL;
+
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ if (rss_info->num)
+ ret = i40e_config_rss_filter(pf, rss_info, FALSE);
+ return ret;
+}
diff --git a/drivers/net/i40e/i40e_logs.h b/drivers/net/i40e/i40e_logs.h
index 8e99cd52..b1049699 100644
--- a/drivers/net/i40e/i40e_logs.h
+++ b/drivers/net/i40e/i40e_logs.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _I40E_LOGS_H_
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 94bb0cfd..dd3962d3 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <sys/queue.h>
@@ -43,7 +14,7 @@
#include <rte_string_fns.h>
#include <rte_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -273,19 +244,23 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
}
static void
-i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
+i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, uint8_t *msg,
+ bool b_op)
{
struct virtchnl_version_info info;
- /* Respond like a Linux PF host in order to support both DPDK VF and
- * Linux VF driver. The expense is original DPDK host specific feature
+ /* VF and PF drivers need to follow the Virtchnl definition, No matter
+ * it's DPDK or other kernel drivers.
+ * The original DPDK host specific feature
* like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
- *
- * DPDK VF also can't identify host driver by version number returned.
- * It always assume talking with Linux PF.
*/
+
info.major = VIRTCHNL_VERSION_MAJOR;
- info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ vf->version = *(struct virtchnl_version_info *)msg;
+ if (VF_IS_V10(&vf->version))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ else
+ info.minor = VIRTCHNL_VERSION_MINOR;
if (b_op)
i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
@@ -309,11 +284,13 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
}
static int
-i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
+i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, uint8_t *msg,
+ bool b_op)
{
struct virtchnl_vf_resource *vf_res = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint32_t len = 0;
+ uint64_t default_hena = I40E_RSS_HENA_ALL;
int ret = I40E_SUCCESS;
if (!b_op) {
@@ -337,11 +314,35 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
goto send_msg;
}
- vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 |
- VIRTCHNL_VF_OFFLOAD_VLAN;
+ if (VF_IS_V10(&vf->version)) /* doesn't support offload negotiate */
+ vf->request_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+ else
+ vf->request_caps = *(uint32_t *)msg;
+
+ /* enable all RSS by default,
+ * doesn't support hena setting by virtchnnl yet.
+ */
+ if (vf->request_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ I40E_WRITE_REG(hw, I40E_VFQF_HENA1(0, vf->vf_idx),
+ (uint32_t)default_hena);
+ I40E_WRITE_REG(hw, I40E_VFQF_HENA1(1, vf->vf_idx),
+ (uint32_t)(default_hena >> 32));
+ I40E_WRITE_FLUSH(hw);
+ }
+
+ vf_res->vf_cap_flags = vf->request_caps &
+ I40E_VIRTCHNL_OFFLOAD_CAPS;
+ /* For X722, it supports write back on ITR
+ * without binding queue to interrupt vector.
+ */
+ if (hw->mac.type == I40E_MAC_X722)
+ vf_res->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
vf_res->num_queue_pairs = vf->vsi->nb_qps;
vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;
+ vf_res->rss_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * 4;
+ vf_res->rss_lut_size = (I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4;
/* Change below setting if PF host can support more VSIs for VF */
vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
@@ -1090,6 +1091,84 @@ i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op)
return ret;
}
+static int
+i40e_pf_host_process_cmd_set_rss_lut(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ struct virtchnl_rss_lut *rss_lut = (struct virtchnl_rss_lut *)msg;
+ uint16_t valid_len;
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_CONFIG_RSS_LUT,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
+ if (!msg || msglen <= sizeof(struct virtchnl_rss_lut)) {
+ PMD_DRV_LOG(ERR, "set_rss_lut argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ valid_len = sizeof(struct virtchnl_rss_lut) + rss_lut->lut_entries - 1;
+ if (msglen < valid_len) {
+ PMD_DRV_LOG(ERR, "set_rss_lut length mismatch");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ ret = i40e_set_rss_lut(vf->vsi, rss_lut->lut, rss_lut->lut_entries);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
+ ret, NULL, 0);
+
+ return ret;
+}
+
+static int
+i40e_pf_host_process_cmd_set_rss_key(struct i40e_pf_vf *vf,
+ uint8_t *msg,
+ uint16_t msglen,
+ bool b_op)
+{
+ struct virtchnl_rss_key *rss_key = (struct virtchnl_rss_key *)msg;
+ uint16_t valid_len;
+ int ret = I40E_SUCCESS;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ VIRTCHNL_OP_DEL_VLAN,
+ VIRTCHNL_OP_CONFIG_RSS_KEY, NULL, 0);
+ return ret;
+ }
+
+ if (!msg || msglen <= sizeof(struct virtchnl_rss_key)) {
+ PMD_DRV_LOG(ERR, "set_rss_key argument too short");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ valid_len = sizeof(struct virtchnl_rss_key) + rss_key->key_len - 1;
+ if (msglen < valid_len) {
+ PMD_DRV_LOG(ERR, "set_rss_key length mismatch");
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ ret = i40e_set_rss_key(vf->vsi, rss_key->key, rss_key->key_len);
+
+send_msg:
+ i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
+ ret, NULL, 0);
+
+ return ret;
+}
+
void
i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
{
@@ -1185,8 +1264,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
* do nothing and send not_supported to VF. As PF must send a response
* to VF and ACK/NACK is not defined.
*/
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
- NULL, &ret_param);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
opcode);
@@ -1196,7 +1274,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
switch (opcode) {
case VIRTCHNL_OP_VERSION:
PMD_DRV_LOG(INFO, "OP_VERSION received");
- i40e_pf_host_process_cmd_version(vf, b_op);
+ i40e_pf_host_process_cmd_version(vf, msg, b_op);
break;
case VIRTCHNL_OP_RESET_VF:
PMD_DRV_LOG(INFO, "OP_RESET_VF received");
@@ -1204,7 +1282,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
- i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
+ i40e_pf_host_process_cmd_get_vf_resource(vf, msg, b_op);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
@@ -1265,6 +1343,14 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received");
i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op);
break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_LUT received");
+ i40e_pf_host_process_cmd_set_rss_lut(vf, msg, msglen, b_op);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_KEY received");
+ i40e_pf_host_process_cmd_set_rss_key(vf, msg, msglen, b_op);
+ break;
/* Don't add command supported below, which will
* return an error code.
*/
diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h
index 04116637..1809ba4d 100644
--- a/drivers/net/i40e/i40e_pf.h
+++ b/drivers/net/i40e/i40e_pf.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#ifndef _I40E_PF_H_
@@ -37,6 +8,12 @@
/* Default setting on number of VSIs that VF can contain */
#define I40E_DEFAULT_VF_VSI_NUM 1
+#define I40E_VIRTCHNL_OFFLOAD_CAPS ( \
+ VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF | \
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+
struct virtchnl_vlan_offload_info {
uint16_t vsi_id;
uint8_t enable_vlan_strip;
diff --git a/drivers/net/i40e/i40e_regs.h b/drivers/net/i40e/i40e_regs.h
index 472c7a06..b19bb1d5 100644
--- a/drivers/net/i40e/i40e_regs.h
+++ b/drivers/net/i40e/i40e_regs.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
*/
struct i40e_reg_info {
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index ad06b71e..1217e5a6 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <stdio.h>
@@ -46,7 +17,7 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_udp.h>
@@ -99,10 +70,6 @@
#define I40E_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
-static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
- struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
-
static inline void
i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
{
@@ -1718,7 +1685,9 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
#endif
dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
- dev->rx_pkt_burst == i40e_recv_pkts_vec)
+ dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2)
return ptypes;
return NULL;
}
@@ -2316,7 +2285,8 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
* vPMD tx will not set sw_ring's mbuf to NULL after free,
* so need to free remains more carefully.
*/
- if (dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
+ if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 ||
+ dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
if (txq->tx_tail < i) {
for (; i < txq->nb_tx_desc; i++) {
@@ -2749,6 +2719,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
rxq->vsi = pf->fdir.fdir_vsi;
rxq->rx_ring_phys_addr = rz->iova;
+ memset(rz->addr, 0, I40E_FDIR_NUM_RX_DESC * sizeof(union i40e_rx_desc));
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
@@ -2839,6 +2810,17 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
dev->data->port_id);
dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base
+ * frequency, limit use of AVX2 version to later
+ * plaforms, not all those that could theoretically
+ * run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ dev->rx_pkt_burst =
+ i40e_recv_scattered_pkts_vec_avx2;
+#endif
} else {
PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
"allocation callback (port=%d).",
@@ -2858,6 +2840,16 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
dev->data->port_id);
dev->rx_pkt_burst = i40e_recv_pkts_vec;
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base
+ * frequency, limit use of AVX2 version to later
+ * plaforms, not all those that could theoretically
+ * run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ dev->rx_pkt_burst = i40e_recv_pkts_vec_avx2;
+#endif
} else if (ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function "
@@ -2878,7 +2870,9 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
rx_using_sse =
(dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
- dev->rx_pkt_burst == i40e_recv_pkts_vec);
+ dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
@@ -2935,6 +2929,16 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
if (ad->tx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base
+ * frequency, limit use of AVX2 version to later
+ * plaforms, not all those that could theoretically
+ * run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx2;
+#endif
} else {
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
@@ -3041,6 +3045,22 @@ i40e_recv_scattered_pkts_vec(
return 0;
}
+uint16_t __attribute__((weak))
+i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
int __attribute__((weak))
i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
{
@@ -3066,3 +3086,11 @@ i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
{
return 0;
}
+
+uint16_t __attribute__((weak))
+i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 06c6a659..34cd7923 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _I40E_RXTX_H_
@@ -256,6 +227,12 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
void i40e_set_tx_function(struct rte_eth_dev *dev);
void i40e_set_default_ptype_table(struct rte_eth_dev *dev);
void i40e_set_default_pctype_table(struct rte_eth_dev *dev);
+uint16_t i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec_avx2(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
/* For each value it means, datasheet of hardware can tell more details
*
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
index 5e4e472a..f3fc8267 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -33,7 +33,7 @@
*/
#include <stdint.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include "base/i40e_prototype.h"
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
new file mode 100644
index 00000000..dbcb61f3
--- /dev/null
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -0,0 +1,792 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <x86intrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ __m128i dma_addr0;
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ struct rte_mbuf *mb0, *mb1;
+ __m128i dma_addr0, dma_addr1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ __m128i vaddr0, vaddr1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+#else
+ struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
+ __m256i dma_addr0_1, dma_addr2_3;
+ __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
+ /* Initialize the mbufs in vector, process 4 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH;
+ i += 4, rxep += 4, rxdp += 4) {
+ __m128i vaddr0, vaddr1, vaddr2, vaddr3;
+ __m256i vaddr0_1, vaddr2_3;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+ mb2 = rxep[2].mbuf;
+ mb3 = rxep[3].mbuf;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+ vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
+ vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
+
+ /*
+ * merge 0 & 1, by casting 0 to 256-bit and inserting 1
+ * into the high lanes. Similarly for 2 & 3
+ */
+ vaddr0_1 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(vaddr0), vaddr1, 1);
+ vaddr2_3 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(vaddr2), vaddr3, 1);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1);
+ dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3);
+
+ /* add headroom to pa values */
+ dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room);
+ dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1);
+ _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3);
+ }
+
+#endif
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline uint16_t
+_recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+#define RTE_I40E_DESCS_PER_LOOP_AVX 8
+
+ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
+ 0, rxq->mbuf_initializer);
+ struct i40e_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
+ volatile union i40e_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
+ const int avx_aligned = ((rxq->rx_tail & 1) == 0);
+ rte_prefetch0(rxdp);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP_AVX */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP_AVX);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ while (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* constants used in processing loop */
+ const __m256i crc_adjust = _mm256_set_epi16(
+ /* first descriptor */
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0, /* ignore pkt_type field */
+ /* second descriptor */
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+
+ /* 8 packets DD mask, LSB in each 32-bit value */
+ const __m256i dd_check = _mm256_set1_epi32(1);
+
+ /* 8 packets EOP mask, second-LSB in each 32-bit value */
+ const __m256i eop_check = _mm256_slli_epi32(dd_check,
+ I40E_RX_DESC_STATUS_EOF_SHIFT);
+
+ /* mask to shuffle from desc. to mbuf (2 descriptors)*/
+ const __m256i shuf_msk = _mm256_set_epi8(
+ /* first descriptor */
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /*pkt_type set as unknown */
+ /* second descriptor */
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
+ );
+ /*
+ * compile-time check the above crc and shuffle layout is correct.
+ * NOTE: the first field (lowest address) is given last in set_epi
+ * calls above.
+ */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+ offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+ /* Status/Error flag masks */
+ /*
+ * mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication. Bits 3-5 of error
+ * field (bits 22-24) are for IP/L4 checksum errors
+ */
+ const __m256i flags_mask = _mm256_set1_epi32(
+ (1 << 2) | (1 << 11) | (3 << 12) | (7 << 22));
+ /*
+ * data to be shuffled by result of flag mask. If VLAN bit is set,
+ * (bit 2), then position 4 in this array will be used in the
+ * destination
+ */
+ const __m256i vlan_flags_shuf = _mm256_set_epi32(
+ 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
+ 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
+ /*
+ * data to be shuffled by result of flag mask, shifted down 11.
+ * If RSS/FDIR bits are set, shuffle moves appropriate flags in
+ * place.
+ */
+ const __m256i rss_flags_shuf = _mm256_set_epi8(
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
+ 0, 0, PKT_RX_FDIR, 0, /* end up 128-bits */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
+ 0, 0, PKT_RX_FDIR, 0);
+
+ /*
+ * data to be shuffled by the result of the flags mask shifted by 22
+ * bits. This gives use the l3_l4 flags.
+ */
+ const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ /* second 128-bits */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+
+ const __m256i cksum_mask = _mm256_set1_epi32(
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD);
+
+ RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
+
+ uint16_t i, received;
+ for (i = 0, received = 0; i < nb_pkts;
+ i += RTE_I40E_DESCS_PER_LOOP_AVX,
+ rxdp += RTE_I40E_DESCS_PER_LOOP_AVX) {
+ /* step 1, copy over 8 mbuf pointers to rx_pkts array */
+ _mm256_storeu_si256((void *)&rx_pkts[i],
+ _mm256_loadu_si256((void *)&sw_ring[i]));
+#ifdef RTE_ARCH_X86_64
+ _mm256_storeu_si256((void *)&rx_pkts[i + 4],
+ _mm256_loadu_si256((void *)&sw_ring[i + 4]));
+#endif
+
+ __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
+#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+ /* for AVX we need alignment otherwise loads are not atomic */
+ if (avx_aligned) {
+ /* load in descriptors, 2 at a time, in reverse order */
+ raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6));
+ rte_compiler_barrier();
+ raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4));
+ rte_compiler_barrier();
+ raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2));
+ rte_compiler_barrier();
+ raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0));
+ } else
+#endif
+ do {
+ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7));
+ rte_compiler_barrier();
+ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6));
+ rte_compiler_barrier();
+ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5));
+ rte_compiler_barrier();
+ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4));
+ rte_compiler_barrier();
+ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3));
+ rte_compiler_barrier();
+ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2));
+ rte_compiler_barrier();
+ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1));
+ rte_compiler_barrier();
+ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0));
+
+ raw_desc6_7 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(raw_desc6), raw_desc7, 1);
+ raw_desc4_5 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(raw_desc4), raw_desc5, 1);
+ raw_desc2_3 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(raw_desc2), raw_desc3, 1);
+ raw_desc0_1 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(raw_desc0), raw_desc1, 1);
+ } while (0);
+
+ if (split_packet) {
+ int j;
+ for (j = 0; j < RTE_I40E_DESCS_PER_LOOP_AVX; j++)
+ rte_mbuf_prefetch_part2(rx_pkts[i + j]);
+ }
+
+ /*
+ * convert descriptors 4-7 into mbufs, adjusting length and
+ * re-arranging fields. Then write into the mbuf
+ */
+ const __m256i len6_7 = _mm256_slli_epi32(raw_desc6_7, PKTLEN_SHIFT);
+ const __m256i len4_5 = _mm256_slli_epi32(raw_desc4_5, PKTLEN_SHIFT);
+ const __m256i desc6_7 = _mm256_blend_epi16(raw_desc6_7, len6_7, 0x80);
+ const __m256i desc4_5 = _mm256_blend_epi16(raw_desc4_5, len4_5, 0x80);
+ __m256i mb6_7 = _mm256_shuffle_epi8(desc6_7, shuf_msk);
+ __m256i mb4_5 = _mm256_shuffle_epi8(desc4_5, shuf_msk);
+ mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust);
+ mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust);
+ /*
+ * to get packet types, shift 64-bit values down 30 bits
+ * and so ptype is in lower 8-bits in each
+ */
+ const __m256i ptypes6_7 = _mm256_srli_epi64(desc6_7, 30);
+ const __m256i ptypes4_5 = _mm256_srli_epi64(desc4_5, 30);
+ const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24);
+ const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8);
+ const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24);
+ const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8);
+ mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4);
+ mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0);
+ mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4);
+ mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0);
+ /* merge the status bits into one register */
+ const __m256i status4_7 = _mm256_unpackhi_epi32(desc6_7,
+ desc4_5);
+
+ /*
+ * convert descriptors 0-3 into mbufs, adjusting length and
+ * re-arranging fields. Then write into the mbuf
+ */
+ const __m256i len2_3 = _mm256_slli_epi32(raw_desc2_3, PKTLEN_SHIFT);
+ const __m256i len0_1 = _mm256_slli_epi32(raw_desc0_1, PKTLEN_SHIFT);
+ const __m256i desc2_3 = _mm256_blend_epi16(raw_desc2_3, len2_3, 0x80);
+ const __m256i desc0_1 = _mm256_blend_epi16(raw_desc0_1, len0_1, 0x80);
+ __m256i mb2_3 = _mm256_shuffle_epi8(desc2_3, shuf_msk);
+ __m256i mb0_1 = _mm256_shuffle_epi8(desc0_1, shuf_msk);
+ mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust);
+ mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust);
+ /* get the packet types */
+ const __m256i ptypes2_3 = _mm256_srli_epi64(desc2_3, 30);
+ const __m256i ptypes0_1 = _mm256_srli_epi64(desc0_1, 30);
+ const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24);
+ const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8);
+ const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24);
+ const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8);
+ mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4);
+ mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0);
+ mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4);
+ mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0);
+ /* merge the status bits into one register */
+ const __m256i status0_3 = _mm256_unpackhi_epi32(desc2_3,
+ desc0_1);
+
+ /*
+ * take the two sets of status bits and merge to one
+ * After merge, the packets status flags are in the
+ * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
+ */
+ __m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
+ status0_3);
+
+ /* now do flag manipulation */
+
+ /* get only flag/error bits we want */
+ const __m256i flag_bits = _mm256_and_si256(
+ status0_7, flags_mask);
+ /* set vlan and rss flags */
+ const __m256i vlan_flags = _mm256_shuffle_epi8(
+ vlan_flags_shuf, flag_bits);
+ const __m256i rss_flags = _mm256_shuffle_epi8(
+ rss_flags_shuf, _mm256_srli_epi32(flag_bits, 11));
+ /*
+ * l3_l4_error flags, shuffle, then shift to correct adjustment
+ * of flags in flags_shuf, and finally mask out extra bits
+ */
+ __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+ _mm256_srli_epi32(flag_bits, 22));
+ l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+ l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+
+ /* merge flags */
+ const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+ _mm256_or_si256(rss_flags, vlan_flags));
+ /*
+ * At this point, we have the 8 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write. However, we can also
+ * add in the previously computed rx_descriptor fields to
+ * make a single 256-bit write per mbuf
+ */
+ /* check the structure matches expectations */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, rearm_data) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+ RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ /* build up data and do writes */
+ __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
+ rearm6, rearm7;
+ rearm6 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 8), 0x04);
+ rearm4 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 4), 0x04);
+ rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
+ rearm0 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(mbuf_flags, 4), 0x04);
+ /* permute to add in the rx_descriptor e.g. rss fields */
+ rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
+ rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
+ rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
+ rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
+ /* write to mbuf */
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, rearm6);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, rearm4);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, rearm2);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, rearm0);
+
+ /* repeat for the odd mbufs */
+ const __m256i odd_flags = _mm256_castsi128_si256(
+ _mm256_extracti128_si256(mbuf_flags, 1));
+ rearm7 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 8), 0x04);
+ rearm5 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 4), 0x04);
+ rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
+ rearm1 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(odd_flags, 4), 0x04);
+ /* since odd mbufs are already in hi 128-bits use blend */
+ rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
+ rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
+ rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
+ rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
+ /* again write to mbufs */
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, rearm7);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, rearm5);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, rearm3);
+ _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, rearm1);
+
+ /* extract and record EOP bit */
+ if (split_packet) {
+ const __m128i eop_mask = _mm_set1_epi16(
+ 1 << I40E_RX_DESC_STATUS_EOF_SHIFT);
+ const __m256i eop_bits256 = _mm256_and_si256(status0_7,
+ eop_check);
+ /* pack status bits into a single 128-bit register */
+ const __m128i eop_bits = _mm_packus_epi32(
+ _mm256_castsi256_si128(eop_bits256),
+ _mm256_extractf128_si256(eop_bits256, 1));
+ /*
+ * flip bits, and mask out the EOP bit, which is now
+ * a split-packet bit i.e. !EOP, rather than EOP one.
+ */
+ __m128i split_bits = _mm_andnot_si128(eop_bits,
+ eop_mask);
+ /*
+ * eop bits are out of order, so we need to shuffle them
+ * back into order again. In doing so, only use low 8
+ * bits, which acts like another pack instruction
+ * The original order is (hi->lo): 1,3,5,7,0,2,4,6
+ * [Since we use epi8, the 16-bit positions are
+ * multiplied by 2 in the eop_shuffle value.]
+ */
+ __m128i eop_shuffle = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF, /* zero hi 64b */
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 8, 0, 10, 2, /* move values to lo 64b */
+ 12, 4, 14, 6);
+ split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
+ *(uint64_t *)split_packet = _mm_cvtsi128_si64(split_bits);
+ split_packet += RTE_I40E_DESCS_PER_LOOP_AVX;
+ }
+
+ /* perform dd_check */
+ status0_7 = _mm256_and_si256(status0_7, dd_check);
+ status0_7 = _mm256_packs_epi32(status0_7,
+ _mm256_setzero_si256());
+
+ uint64_t burst = __builtin_popcountll(_mm_cvtsi128_si64(
+ _mm256_extracti128_si256(status0_7, 1)));
+ burst += __builtin_popcountll(_mm_cvtsi128_si64(
+ _mm256_castsi256_si128(status0_7)));
+ received += burst;
+ if (burst != RTE_I40E_DESCS_PER_LOOP_AVX)
+ break;
+ }
+
+ /* update tail pointers */
+ rxq->rx_tail += received;
+ rxq->rx_tail &= (rxq->nb_rx_desc - 1);
+ if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
+ rxq->rx_tail--;
+ received--;
+ }
+ rxq->rxrearm_nb += received;
+ return received;
+}
+
+/*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/*
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+ while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
+ uint16_t burst = i40e_recv_scattered_burst_vec_avx2(rx_queue,
+ rx_pkts + retval, RTE_I40E_VPMD_RX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < RTE_I40E_VPMD_RX_BURST)
+ return retval;
+ }
+ return retval + i40e_recv_scattered_burst_vec_avx2(rx_queue,
+ rx_pkts + retval, nb_pkts);
+}
+
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_physaddr + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ const uint64_t hi_qw_tmpl = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT));
+
+ /* if unaligned on 32-bit boundary, do one to align */
+ if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) {
+ vtx1(txdp, *pkt, flags);
+ nb_pkts--, txdp++, pkt++;
+ }
+
+ /* do two at a time while possible, in bursts */
+ for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
+ uint64_t hi_qw3 = hi_qw_tmpl |
+ ((uint64_t)pkt[3]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
+ uint64_t hi_qw2 = hi_qw_tmpl |
+ ((uint64_t)pkt[2]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
+ uint64_t hi_qw1 = hi_qw_tmpl |
+ ((uint64_t)pkt[1]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
+ uint64_t hi_qw0 = hi_qw_tmpl |
+ ((uint64_t)pkt[0]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT);
+
+ __m256i desc2_3 = _mm256_set_epi64x(
+ hi_qw3, pkt[3]->buf_physaddr + pkt[3]->data_off,
+ hi_qw2, pkt[2]->buf_physaddr + pkt[2]->data_off);
+ __m256i desc0_1 = _mm256_set_epi64x(
+ hi_qw1, pkt[1]->buf_physaddr + pkt[1]->data_off,
+ hi_qw0, pkt[0]->buf_physaddr + pkt[0]->data_off);
+ _mm256_store_si256((void *)(txdp + 2), desc2_3);
+ _mm256_store_si256((void *)txdp, desc0_1);
+ }
+
+ /* do any last ones */
+ while (nb_pkts) {
+ vtx1(txdp, *pkt, flags);
+ txdp++, pkt++, nb_pkts--;
+ }
+}
+
+static inline uint16_t
+i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ vtx(txdp, tx_pkts, n - 1, flags);
+ tx_pkts += (n - 1);
+ txdp += (n - 1);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = i40e_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 39a6da06..3ffedcb9 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -1,40 +1,11 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _I40E_RXTX_VEC_COMMON_H_
#define _I40E_RXTX_VEC_COMMON_H_
#include <stdint.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include "i40e_ethdev.h"
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index b5685e2b..e549d1e8 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -33,7 +33,7 @@
*/
#include <stdint.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include "base/i40e_prototype.h"
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index 9d2d1f83..3b22588c 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -1,38 +1,9 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <stdint.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include "base/i40e_prototype.h"
diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c
index 44316f64..c76760c9 100644
--- a/drivers/net/i40e/i40e_tm.c
+++ b/drivers/net/i40e/i40e_tm.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <rte_malloc.h>
diff --git a/drivers/net/i40e/meson.build b/drivers/net/i40e/meson.build
new file mode 100644
index 00000000..8764b0e5
--- /dev/null
+++ b/drivers/net/i40e/meson.build
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+cflags += ['-DPF_DRIVER',
+ '-DVF_DRIVER',
+ '-DINTEGRATED_VF',
+ '-DX722_A0_SUPPORT']
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'i40e_ethdev.c',
+ 'i40e_rxtx.c',
+ 'i40e_ethdev_vf.c',
+ 'i40e_pf.c',
+ 'i40e_fdir.c',
+ 'i40e_flow.c',
+ 'i40e_tm.c',
+ 'rte_pmd_i40e.c'
+ )
+
+deps += ['hash']
+
+if arch_subdir == 'x86'
+ dpdk_conf.set('RTE_LIBRTE_I40E_INC_VECTOR', 1)
+ sources += files('i40e_rxtx_vec_sse.c')
+
+ # compile AVX2 version if either:
+ # a. we have AVX supported in minimum instruction set baseline
+ # b. it's not minimum instruction set, but supported by compiler
+ if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2')
+ sources += files('i40e_rxtx_vec_avx2.c')
+ elif cc.has_argument('-mavx2')
+ i40e_avx2_lib = static_library('i40e_avx2_lib',
+ 'i40e_rxtx_vec_avx2.c',
+ dependencies: [static_rte_ethdev,
+ static_rte_kvargs, static_rte_hash],
+ c_args: '-mavx2')
+ objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c')
+ endif
+endif
+
+includes += include_directories('base')
+
+install_headers('rte_pmd_i40e.h')
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index aeb92af3..dae59e6d 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <rte_malloc.h>
@@ -1525,7 +1496,14 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
struct rte_pmd_i40e_profile_info *pinfo, *p;
uint32_t i;
int ret;
+ static const uint32_t group_mask = 0x00ff0000;
+ pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
+ sizeof(struct i40e_profile_section_header));
+ if (pinfo->track_id == 0) {
+ PMD_DRV_LOG(INFO, "Read-only profile.");
+ return 0;
+ }
buff = rte_zmalloc("pinfo_list",
(I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
0);
@@ -1544,8 +1522,6 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
return -1;
}
p_list = (struct rte_pmd_i40e_profile_list *)buff;
- pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
- sizeof(struct i40e_profile_section_header));
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
if (pinfo->track_id == p->track_id) {
@@ -1554,6 +1530,23 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
return 1;
}
}
+ for (i = 0; i < p_list->p_count; i++) {
+ p = &p_list->p_info[i];
+ if ((p->track_id & group_mask) == 0) {
+ PMD_DRV_LOG(INFO, "Profile of the group 0 exists.");
+ rte_free(buff);
+ return 2;
+ }
+ }
+ for (i = 0; i < p_list->p_count; i++) {
+ p = &p_list->p_info[i];
+ if ((pinfo->track_id & group_mask) !=
+ (p->track_id & group_mask)) {
+ PMD_DRV_LOG(INFO, "Profile of different group exists.");
+ rte_free(buff);
+ return 3;
+ }
+ }
rte_free(buff);
return 0;
@@ -1573,6 +1566,7 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
uint8_t *profile_info_sec;
int is_exist;
enum i40e_status_code status = I40E_SUCCESS;
+ static const uint32_t type_mask = 0xff000000;
if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
@@ -1624,6 +1618,10 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
return -EINVAL;
}
+ /* force read-only track_id for type 0 */
+ if ((track_id & type_mask) == 0)
+ track_id = 0;
+
/* Find profile segment */
profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
pkg_hdr);
@@ -1657,12 +1655,17 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
if (is_exist) {
- PMD_DRV_LOG(ERR, "Profile already exists.");
+ if (is_exist == 1)
+ PMD_DRV_LOG(ERR, "Profile already exists.");
+ else if (is_exist == 2)
+ PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
+ else if (is_exist == 3)
+ PMD_DRV_LOG(ERR, "Profile of different group already exists");
rte_free(profile_info_sec);
return -EEXIST;
}
} else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
- if (!is_exist) {
+ if (is_exist != 1) {
PMD_DRV_LOG(ERR, "Profile does not exist.");
rte_free(profile_info_sec);
return -EACCES;
@@ -2082,7 +2085,8 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
l2 != RTE_PTYPE_L2_ETHER_LLDP &&
l2 != RTE_PTYPE_L2_ETHER_NSH &&
l2 != RTE_PTYPE_L2_ETHER_VLAN &&
- l2 != RTE_PTYPE_L2_ETHER_QINQ)
+ l2 != RTE_PTYPE_L2_ETHER_QINQ &&
+ l2 != RTE_PTYPE_L2_ETHER_PPPOE)
return -1;
if (l3 &&
@@ -2111,7 +2115,8 @@ static int check_invalid_pkt_type(uint32_t pkt_type)
tnl != RTE_PTYPE_TUNNEL_GENEVE &&
tnl != RTE_PTYPE_TUNNEL_GRENAT &&
tnl != RTE_PTYPE_TUNNEL_GTPC &&
- tnl != RTE_PTYPE_TUNNEL_GTPU)
+ tnl != RTE_PTYPE_TUNNEL_GTPU &&
+ tnl != RTE_PTYPE_TUNNEL_L2TP)
return -1;
if (il2 &&
@@ -2845,22 +2850,23 @@ i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
return 0;
}
- info->queue_region_number = 1;
- info->region[0].queue_num = main_vsi->nb_used_qps;
- info->region[0].queue_start_index = 0;
-
- ret = i40e_vsi_update_queue_region_mapping(hw, pf);
- if (ret != I40E_SUCCESS)
- PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+ if (info->queue_region_number) {
+ info->queue_region_number = 1;
+ info->region[0].queue_num = main_vsi->nb_used_qps;
+ info->region[0].queue_start_index = 0;
- ret = i40e_dcb_init_configure(dev, TRUE);
- if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(INFO, "Failed to flush dcb.");
- pf->flags &= ~I40E_FLAG_DCB;
- }
+ ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
- i40e_init_queue_region_conf(dev);
+ ret = i40e_dcb_init_configure(dev, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+ pf->flags &= ~I40E_FLAG_DCB;
+ }
+ i40e_init_queue_region_conf(dev);
+ }
return 0;
}
@@ -2985,3 +2991,144 @@ int rte_pmd_i40e_flow_add_del_packet_template(
return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
}
+
+int
+rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
+ struct rte_pmd_i40e_inset *inset,
+ enum rte_pmd_i40e_inset_type inset_type)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ uint64_t inset_reg;
+ uint32_t mask_reg[2];
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (pctype > 63)
+ return -EINVAL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ memset(inset, 0, sizeof(struct rte_pmd_i40e_inset));
+
+ switch (inset_type) {
+ case INSET_HASH:
+ /* Get input set */
+ inset_reg =
+ i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
+ inset_reg <<= I40E_32_BIT_WIDTH;
+ inset_reg |=
+ i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
+ /* Get field mask */
+ mask_reg[0] =
+ i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype));
+ mask_reg[1] =
+ i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype));
+ break;
+ case INSET_FDIR:
+ inset_reg =
+ i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
+ inset_reg <<= I40E_32_BIT_WIDTH;
+ inset_reg |=
+ i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
+ mask_reg[0] =
+ i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype));
+ mask_reg[1] =
+ i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype));
+ break;
+ case INSET_FDIR_FLX:
+ inset_reg =
+ i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype));
+ mask_reg[0] =
+ i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0));
+ mask_reg[1] =
+ i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1));
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported input set type.");
+ return -EINVAL;
+ }
+
+ inset->inset = inset_reg;
+
+ for (i = 0; i < 2; i++) {
+ inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F);
+ inset->mask[i].mask = mask_reg[i] & 0xFFFF;
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
+ struct rte_pmd_i40e_inset *inset,
+ enum rte_pmd_i40e_inset_type inset_type)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ uint64_t inset_reg;
+ uint32_t mask_reg[2];
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (pctype > 63)
+ return -EINVAL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Clear mask first */
+ for (i = 0; i < 2; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
+
+ inset_reg = inset->inset;
+ for (i = 0; i < 2; i++)
+ mask_reg[i] = (inset->mask[i].field_idx << 16) |
+ inset->mask[i].mask;
+
+ switch (inset_type) {
+ case INSET_HASH:
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+ for (i = 0; i < 2; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
+ break;
+ case INSET_FDIR:
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
+ for (i = 0; i < 2; i++)
+ i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
+ break;
+ case INSET_FDIR_FLX:
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ for (i = 0; i < 2; i++)
+ i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i),
+ mask_reg[i]);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported input set type.");
+ return -EINVAL;
+ }
+
+ I40E_WRITE_FLUSH(hw);
+ return 0;
+}
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index 580ca4ae..d248adb1 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#ifndef _PMD_I40E_H_
@@ -42,7 +14,7 @@
*
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
/**
* Response sent back to i40e driver from user app after callback
@@ -94,7 +66,7 @@ enum rte_pmd_i40e_package_info {
RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST,
RTE_PMD_I40E_PKG_INFO_PTYPE_NUM,
RTE_PMD_I40E_PKG_INFO_PTYPE_LIST,
- RTE_PMD_I40E_PKG_INFO_MAX = 0xFFFFFFFF
+ RTE_PMD_I40E_PKG_INFO_MAX = (int)0xFFFFFFFF
};
/**
@@ -317,6 +289,23 @@ struct rte_pmd_i40e_pkt_template_conf {
uint32_t soft_id;
};
+enum rte_pmd_i40e_inset_type {
+ INSET_NONE = 0,
+ INSET_HASH,
+ INSET_FDIR,
+ INSET_FDIR_FLX,
+};
+
+struct rte_pmd_i40e_inset_mask {
+ uint8_t field_idx;
+ uint16_t mask;
+};
+
+struct rte_pmd_i40e_inset {
+ uint64_t inset;
+ struct rte_pmd_i40e_inset_mask mask[2];
+};
+
/**
* Add or remove raw packet template filter to Flow Director.
*
@@ -933,4 +922,125 @@ int rte_pmd_i40e_query_vfid_by_mac(uint16_t port,
int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
enum rte_pmd_i40e_queue_region_op op_type, void *arg);
+int rte_pmd_i40e_cfg_hash_inset(uint16_t port,
+ uint64_t pctype, uint64_t inset);
+
+/**
+ * Get input set
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param pctype
+ * HW pctype.
+ * @param inset
+ * Buffer for input set info.
+ * @param inset_type
+ * Type of input set.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) if operation not supported.
+ */
+int rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype,
+ struct rte_pmd_i40e_inset *inset,
+ enum rte_pmd_i40e_inset_type inset_type);
+
+/**
+ * Set input set
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param pctype
+ * HW pctype.
+ * @param inset
+ * Input set info.
+ * @param inset_type
+ * Type of input set.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) if operation not supported.
+ */
+int rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
+ struct rte_pmd_i40e_inset *inset,
+ enum rte_pmd_i40e_inset_type inset_type);
+
+/**
+ * Get bit value for some field index
+ *
+ * @param inset
+ * Input set value.
+ * @param field_idx
+ * Field index for input set.
+ * @return
+ * - (1) if set.
+ * - (0) if cleared.
+ */
+static inline int
+rte_pmd_i40e_inset_field_get(uint64_t inset, uint8_t field_idx)
+{
+ uint8_t bit_idx;
+
+ if (field_idx > 63)
+ return 0;
+
+ bit_idx = 63 - field_idx;
+ if (inset & (1ULL << bit_idx))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Set bit value for some field index
+ *
+ * @param inset
+ * Input set value.
+ * @param field_idx
+ * Field index for input set.
+ * @return
+ * - (-1) if failed.
+ * - (0) if success.
+ */
+static inline int
+rte_pmd_i40e_inset_field_set(uint64_t *inset, uint8_t field_idx)
+{
+ uint8_t bit_idx;
+
+ if (field_idx > 63)
+ return -1;
+
+ bit_idx = 63 - field_idx;
+ *inset = *inset | (1ULL << bit_idx);
+
+ return 0;
+}
+
+/**
+ * Clear bit value for some field index
+ *
+ * @param inset
+ * Input set value.
+ * @param field_idx
+ * Field index for input set.
+ * @return
+ * - (-1) if failed.
+ * - (0) if success.
+ */
+static inline int
+rte_pmd_i40e_inset_field_clear(uint64_t *inset, uint8_t field_idx)
+{
+ uint8_t bit_idx;
+
+ if (field_idx > 63)
+ return -1;
+
+ bit_idx = 63 - field_idx;
+ *inset = *inset & ~(1ULL << bit_idx);
+
+ return 0;
+}
+
#endif /* _PMD_I40E_H_ */
diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map
index ebbd24e0..cccd5768 100644
--- a/drivers/net/i40e/rte_pmd_i40e_version.map
+++ b/drivers/net/i40e/rte_pmd_i40e_version.map
@@ -58,3 +58,10 @@ DPDK_17.11 {
rte_pmd_i40e_rss_queue_region_conf;
} DPDK_17.08;
+
+DPDK_18.02 {
+ global:
+
+ rte_pmd_i40e_inset_get;
+ rte_pmd_i40e_inset_set;
+} DPDK_17.11; \ No newline at end of file
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 511a64eb..d0804fc5 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2016 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
@@ -36,6 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_ixgbe.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index 8c833b44..70fdfe7c 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -34,7 +34,7 @@ Intel® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2017.05.16 released by the team which develop
+cid-ixgbe.2018.01.02.tar.gz released by the team which develop
basic drivers for any ixgbe NIC. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c
index d64abb2e..ee7ce2e9 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -548,6 +548,7 @@ out:
/**
* ixgbe_start_mac_link_82598 - Configures MAC link settings
* @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
@@ -1205,7 +1206,7 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
- * @eeprom_data: value read
+ * @sff8472_data: value read
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
index d9d11a8e..26217212 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -87,6 +87,9 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
mac->ops.set_rate_select_speed =
ixgbe_set_hard_rate_select_speed;
+ if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_soft_rate_select_speed;
} else {
if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -265,7 +268,7 @@ s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
/**
* prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
* @hw: pointer to hardware structure
- * @reg_val: value to write to AUTOC
+ * @autoc: value to write to AUTOC
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous proc_autoc_read_82599.
*
@@ -564,6 +567,10 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_QSFP_SF_QP:
media_type = ixgbe_media_type_fiber_qsfp;
break;
+ case IXGBE_DEV_ID_82599_BYPASS:
+ media_type = ixgbe_media_type_fiber_fixed;
+ hw->phy.multispeed_fiber = true;
+ break;
default:
media_type = ixgbe_media_type_unknown;
break;
@@ -1367,6 +1374,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
bool cloud_mode)
{
+ UNREFERENCED_1PARAMETER(cloud_mode);
DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
/*
@@ -1455,7 +1463,8 @@ do { \
/**
* ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
- * @stream: input bitstream to compute the hash on
+ * @input: input bitstream to compute the hash on
+ * @common: compressed common input dword
*
* This function is almost identical to the function above but contains
* several optimizations such as unwinding all of the loops, letting the
@@ -1594,7 +1603,7 @@ do { \
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
- * @atr_input: input bitstream to compute the hash on
+ * @input: input bitstream to compute the hash on
* @input_mask: mask for the input bitstream
*
* This function serves two main purposes. First it applies the input_mask
@@ -1695,6 +1704,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
u32 fdirm = IXGBE_FDIRM_DIPv6;
u32 fdirtcpm;
u32 fdirip6m;
+ UNREFERENCED_1PARAMETER(cloud_mode);
DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
/*
@@ -1739,15 +1749,17 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
case 0x0000:
- /* mask VLAN ID, fall through to mask VLAN priority */
+ /* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
+ /* fall through */
case 0x0FFF:
/* mask VLAN priority */
fdirm |= IXGBE_FDIRM_VLANP;
break;
case 0xE000:
- /* mask VLAN ID only, fall through */
+ /* mask VLAN ID only */
fdirm |= IXGBE_FDIRM_VLANID;
+ /* fall through */
case 0xEFFF:
/* no VLAN fields masked */
break;
@@ -1758,8 +1770,9 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
switch (input_mask->formatted.flex_bytes & 0xFFFF) {
case 0x0000:
- /* Mask Flex Bytes, fall through */
+ /* Mask Flex Bytes */
fdirm |= IXGBE_FDIRM_FLEX;
+ /* fall through */
case 0xFFFF:
break;
default:
@@ -1868,6 +1881,7 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
u32 addr_low, addr_high;
u32 cloud_type = 0;
s32 err;
+ UNREFERENCED_1PARAMETER(cloud_mode);
DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
if (!cloud_mode) {
@@ -1992,6 +2006,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
* @input_mask: mask for the input bitstream
* @soft_id: software index for the filters
* @queue: queue index to direct traffic to
+ * @cloud_mode: unused
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
@@ -2002,6 +2017,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id, u8 queue, bool cloud_mode)
{
s32 err = IXGBE_ERR_CONFIG;
+ UNREFERENCED_1PARAMETER(cloud_mode);
DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
@@ -2024,6 +2040,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
DEBUGOUT(" Error on src/dst port\n");
return IXGBE_ERR_CONFIG;
}
+ /* fall through */
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
@@ -2510,6 +2527,7 @@ reset_pipeline_out:
* ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -2567,6 +2585,7 @@ release_i2c_access:
* ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to read from
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
index 4117fb01..e50c1045 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -178,6 +178,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_LS:
+ case IXGBE_DEV_ID_82599_BYPASS:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
@@ -192,6 +193,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X540_BYPASS:
hw->mac.type = ixgbe_mac_X540;
hw->mvals = ixgbe_mvals_X540;
break;
@@ -549,6 +551,7 @@ s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
* ixgbe_read_phy_reg - Read PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: type of device you want to communicate with
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register
@@ -567,6 +570,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* ixgbe_write_phy_reg - Write PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
+ * @device_type: type of device you want to communicate with
* @phy_data: Data to write to the PHY register
*
* Writes a value to specified PHY register
@@ -610,6 +614,8 @@ s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw)
/**
* ixgbe_check_phy_link - Determine link and speed status
* @hw: pointer to hardware structure
+ * @speed: link speed
+ * @link_up: true when link is up
*
* Reads a PHY register to determine if link is up and the current speed for
* the PHY.
@@ -625,6 +631,7 @@ s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* ixgbe_setup_phy_link_speed - Set auto advertise
* @hw: pointer to hardware structure
* @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Sets the auto advertised capabilities
**/
@@ -650,6 +657,9 @@ s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on)
/**
* ixgbe_check_link - Get link and speed status
* @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
*
* Reads the links register to determine if link is up and the current speed
**/
@@ -703,6 +713,7 @@ void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
* ixgbe_setup_link - Set link speed
* @hw: pointer to hardware structure
* @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Configures link settings. Restarts the link.
* Performs autonegotiation if needed.
@@ -719,6 +730,7 @@ s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* ixgbe_setup_mac_link - Set link speed
* @hw: pointer to hardware structure
* @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Configures link settings. Restarts the link.
* Performs autonegotiation if needed.
@@ -734,6 +746,8 @@ s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/**
* ixgbe_get_link_capabilities - Returns link capabilities
* @hw: pointer to hardware structure
+ * @speed: link speed capabilities
+ * @autoneg: true when autoneg or autotry is enabled
*
* Determines the link capabilities of the current configuration.
**/
@@ -786,6 +800,7 @@ s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
/**
* ixgbe_blink_led_stop - Stop blinking LEDs
* @hw: pointer to hardware structure
+ * @index: led number to stop
*
* Stop blinking LED based on index.
**/
@@ -1028,6 +1043,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
* @mc_addr_list: the list of new multicast addresses
* @mc_addr_count: number of addresses
* @func: iterator function to walk the multicast address list
+ * @clear: flag, when set clears the table beforehand
*
* The given list replaces any existing list. Clears the MC addrs from receive
* address registers and the multicast table. Uses unused receive address
@@ -1241,7 +1257,7 @@ s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee)
/**
* ixgbe_set_source_address_pruning - Enable/Disable source address pruning
* @hw: pointer to hardware structure
- * @enbale: enable or disable source address pruning
+ * @enable: enable or disable source address pruning
* @pool: Rx pool - Rx pool to toggle source address pruning
**/
void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index 7f85713e..e7e9256e 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -167,6 +167,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
/* flow control autoneg black list */
@@ -200,6 +201,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X540_BYPASS:
case IXGBE_DEV_ID_X550T:
case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
@@ -264,7 +266,8 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
if (ret_val != IXGBE_SUCCESS)
goto out;
- /* only backplane uses autoc so fall though */
+ /* fall through - only backplane uses autoc */
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -2093,6 +2096,7 @@ STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
/**
* ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
* @hw: pointer to hardware structure
+ * @count: number of bits to shift
**/
STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
{
@@ -2151,7 +2155,7 @@ STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
/**
* ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
* @hw: pointer to hardware structure
- * @eecd: EECD's current value
+ * @eec: EEC's current value
**/
STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
{
@@ -2531,6 +2535,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
* ixgbe_add_uc_addr - Adds a secondary unicast address.
* @hw: pointer to hardware structure
* @addr: new address
+ * @vmdq: VMDq "set" or "pool" index
*
* Adds it to unused receive address register or goes into promiscuous mode.
**/
@@ -2675,7 +2680,7 @@ STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
/**
* ixgbe_set_mta - Set bit-vector in multicast table
* @hw: pointer to hardware structure
- * @hash_value: Multicast address hash value
+ * @mc_addr: Multicast address
*
* Sets the bit-vector in the multicast table.
**/
@@ -3122,6 +3127,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
@@ -3347,7 +3353,7 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
**/
s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
{
-#define IXGBE_MAX_SECRX_POLL 40
+#define IXGBE_MAX_SECRX_POLL 4000
int i;
int secrxreg;
@@ -3364,7 +3370,7 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
break;
else
/* Use interrupt-safe sleep just in case */
- usec_delay(1000);
+ usec_delay(10);
}
/* For informational purposes only */
@@ -3378,6 +3384,7 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
/**
* prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
* @hw: pointer to hardware structure
+ * @locked: bool to indicate whether the SW/FW lock was taken
* @reg_val: Value we read from AUTOC
*
* The default case requires no protection so just to the register read.
@@ -3905,6 +3912,9 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
+ * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
+ * vlanid not found
+ *
*
* return the VLVF index where this VLAN id should be placed
*
@@ -4246,10 +4256,17 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_10_X550EM_A:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
+#ifdef PREBOOT_SUPPORT
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
- hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
*speed = IXGBE_LINK_SPEED_10_FULL;
- }
+#else
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+#endif /* PREBOOT_SUPPORT */
break;
default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -4577,10 +4594,11 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout, bool return_data)
{
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
- u16 dword_len;
+ struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
u16 buf_len;
s32 status;
u32 bi;
+ u32 dword_len;
DEBUGFUNC("ixgbe_host_interface_command");
@@ -4607,11 +4625,26 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* first pull in the header so we know the buffer length */
for (bi = 0; bi < dword_len; bi++) {
buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
}
- /* If there is any thing in data position pull it in */
- buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+ /*
+ * If there is any thing in data position pull it in
+ * Read Flash command requires reading buffer length from
+ * two byes instead of one byte
+ */
+ if (resp->cmd == 0x30) {
+ for (; bi < dword_len + 2; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
+ buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
+ & 0xF00) | resp->buf_len;
+ hdr_size += (2 << 2);
+ } else {
+ buf_len = resp->buf_len;
+ }
if (!buf_len)
goto rel_out;
@@ -4627,7 +4660,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
}
rel_out:
@@ -4643,6 +4676,8 @@ rel_out:
* @min: driver version minor number
* @build: driver version build number
* @sub: driver version sub build number
+ * @len: unused
+ * @driver_ver: unused
*
* Sends driver version number to firmware through the manageability
* block. On success return IXGBE_SUCCESS
@@ -4669,10 +4704,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.ver_build = build;
fw_cmd.ver_sub = sub;
fw_cmd.hdr.checksum = 0;
- fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
- (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
fw_cmd.pad = 0;
fw_cmd.pad2 = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
@@ -4727,7 +4762,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
for (; i < (num_pb / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- /* Fall through to configure remaining packet buffers */
+ /* fall through - configure remaining packet buffers */
case PBA_STRATEGY_EQUAL:
rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
for (; i < num_pb; i++)
@@ -4830,7 +4865,6 @@ STATIC const u8 ixgbe_emc_therm_limit[4] = {
/**
* ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
* @hw: pointer to hardware structure
- * @data: pointer to the thermal sensor data structure
*
* Returns the thermal sensor data structure
**/
@@ -4983,6 +5017,117 @@ eeprom_err:
return IXGBE_NOT_IMPLEMENTED;
}
+/**
+ * ixgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid option ROM version, nvm_ver->or_valid set to true
+ * else nvm_ver->or_valid is false.
+ **/
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+ nvm_ver->or_valid = false;
+ /* Option Rom may or may not be present. Start with pointer */
+ hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
+
+ /* make sure offset is valid */
+ if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+ /* option rom exists and is valid */
+ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+ eeprom_cfg_blkl == NVM_VER_INVALID ||
+ eeprom_cfg_blkh == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->or_valid = true;
+ nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+ nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+ (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+ nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ * ixgbe_get_oem_prod_version - Return OEM Product version
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * if valid OEM product version, nvm_ver->oem_valid set to true
+ * else nvm_ver->oem_valid is false.
+ **/
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 rel_num, prod_ver, mod_len, cap, offset;
+
+ nvm_ver->oem_valid = false;
+ hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+ /* Return is offset to OEM Product Version block is invalid */
+ if (offset == 0x0 && offset == NVM_INVALID_PTR)
+ return;
+
+ /* Read product version block */
+ hw->eeprom.ops.read(hw, offset, &mod_len);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+ /* Return if OEM product version block is invalid */
+ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+ (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+ return;
+
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+ /* Return if version is invalid */
+ if ((rel_num | prod_ver) == 0x0 ||
+ rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+ return;
+
+ nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+ nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+ nvm_ver->oem_release = rel_num;
+ nvm_ver->oem_valid = true;
+}
+
+/**
+ * ixgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * word read errors will return 0xFFFF
+ **/
+void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 etk_id_l, etk_id_h;
+
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+ etk_id_l = NVM_VER_INVALID;
+ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
+ etk_id_h = NVM_VER_INVALID;
+
+ /* The word order for the version format is determined by high order
+ * word bit 15.
+ */
+ if ((etk_id_h & NVM_ETK_VALID) == 0) {
+ nvm_ver->etk_id = etk_id_h;
+ nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+ } else {
+ nvm_ver->etk_id = etk_id_l;
+ nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+ }
+}
+
/**
* ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
@@ -5054,8 +5199,8 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
return false;
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
- fwsm &= IXGBE_FWSM_MODE_MASK;
- return fwsm == IXGBE_FWSM_FW_MODE_PT;
+
+ return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
}
/**
@@ -5122,6 +5267,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
ixgbe_set_rate_select_speed(hw,
IXGBE_LINK_SPEED_10GB_FULL);
@@ -5172,6 +5318,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
ixgbe_set_rate_select_speed(hw,
IXGBE_LINK_SPEED_1GB_FULL);
diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h
index 903f34d5..fd35dcc4 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.h
+++ b/drivers/net/ixgbe/base/ixgbe_common.h
@@ -183,6 +183,12 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+
+void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb.c b/drivers/net/ixgbe/base/ixgbe_dcb.c
index 9a6a5085..2877f22b 100644
--- a/drivers/net/ixgbe/base/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/base/ixgbe_dcb.c
@@ -43,6 +43,10 @@ POSSIBILITY OF SUCH DAMAGE.
* are the smallest unit programmable into the underlying
* hardware. The IEEE 802.1Qaz specification do not use bandwidth
* groups so this is much simplified from the CEE case.
+ * @bw: bandwidth index by traffic class
+ * @refill: refill credits index by traffic class
+ * @max: max credits by traffic class
+ * @max_frame_size: maximum frame size
*/
s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
int max_frame_size)
@@ -77,8 +81,10 @@ s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
/**
* ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
- * @ixgbe_dcb_config: Struct containing DCB settings.
- * @direction: Configuring either Tx or Rx.
+ * @hw: pointer to hardware structure
+ * @dcb_config: Struct containing DCB settings
+ * @max_frame_size: Maximum frame size
+ * @direction: Configuring either Tx or Rx
*
* This function calculates the credits allocated to each traffic class.
* It should be called only after the rules are checked by
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82598.c b/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
index 7ff7beb4..3ed8337b 100644
--- a/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_dcb_82598.c
@@ -110,7 +110,9 @@ s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
@@ -165,7 +167,10 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
@@ -209,7 +214,10 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
@@ -254,7 +262,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
*
* Configure Priority Flow Control for each traffic class.
*/
@@ -338,7 +346,11 @@ s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @link_speed: unused
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
*
* Configure dcb settings and enable dcb mode.
*/
diff --git a/drivers/net/ixgbe/base/ixgbe_dcb_82599.c b/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
index a52f83a0..8f9e1590 100644
--- a/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_dcb_82599.c
@@ -109,7 +109,11 @@ s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
@@ -168,7 +172,10 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
/**
* ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
@@ -213,7 +220,11 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
/**
* ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
@@ -361,6 +372,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
/**
* ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
* @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
@@ -571,7 +583,12 @@ s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @link_speed: unused
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @tsa: transmission selection algorithm indexed by traffic class
+ * @map: priority to tc assignments indexed by priority
*
* Configure dcb settings and enable dcb mode.
*/
diff --git a/drivers/net/ixgbe/base/ixgbe_hv_vf.c b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
index 47143a26..40dad775 100644
--- a/drivers/net/ixgbe/base/ixgbe_hv_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
@@ -36,6 +36,11 @@ POSSIBILITY OF SUCH DAMAGE.
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @mc_addr_list: unused
+ * @mc_addr_count: unused
+ * @next: unused
+ * @clear: unused
*/
static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr next,
@@ -48,6 +53,8 @@ static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_li
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @xcast_mode: unused
*/
static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
@@ -58,6 +65,11 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
/**
* Hyper-V variant - just a stub.
+ * @hw: unused
+ * @vlan: unused
+ * @vind: unused
+ * @vlan_on: unused
+ * @vlvf_bypass: unused
*/
static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
@@ -96,6 +108,11 @@ static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 v
/**
* Hyper-V variant; there is no mailbox communication.
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: unused
+ *
*/
static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.c b/drivers/net/ixgbe/base/ixgbe_mbx.c
index 042e5cc1..2785bbad 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.c
@@ -444,17 +444,6 @@ STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
- /*
- * Complete the remaining mailbox data registers with zero to reset
- * the data sent in a previous exchange (in either side) with the PF,
- * including exchanges performed by another Guest OS to which that VF
- * was previously assigned.
- */
- while (i < hw->mbx.size) {
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, 0);
- i++;
- }
-
/* update stats */
hw->mbx.stats.msgs_tx++;
@@ -693,17 +682,6 @@ STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
- /*
- * Complete the remaining mailbox data registers with zero to reset
- * the data sent in a previous exchange (in either side) with the VF,
- * including exchanges performed by another Guest OS to which that VF
- * was previously assigned.
- */
- while (i < hw->mbx.size) {
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, 0);
- i++;
- }
-
/* Interrupt VF to tell it a message has been sent and release buffer*/
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index 62c3080a..2df068ee 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -86,8 +86,8 @@ STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
/**
* ixgbe_ones_comp_byte_add - Perform one's complement addition
- * @add1 - addend 1
- * @add2 - addend 2
+ * @add1: addend 1
+ * @add2: addend 2
*
* Returns one's complement 8-bit sum.
*/
@@ -398,6 +398,7 @@ s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
/**
* ixgbe_validate_phy_addr - Determines phy address is valid
* @hw: pointer to hardware structure
+ * @phy_addr: PHY address
*
**/
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
@@ -575,6 +576,7 @@ out:
* the SWFW lock
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
@@ -656,6 +658,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* using the SWFW lock - this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -872,6 +875,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
+ * @autoneg_wait_to_complete: unused
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -977,6 +981,8 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
/**
* ixgbe_check_phy_link_tnx - Determine link and speed status
* @hw: pointer to hardware structure
+ * @speed: current link speed
+ * @link_up: true is link is up, false otherwise
*
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
@@ -1941,7 +1947,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: byte offset at address 0xA2
- * @eeprom_data: value read
+ * @sff8472_data: value read
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
@@ -1990,6 +1996,7 @@ STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
* ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
* @lock: true if to take and release semaphore
*
@@ -2081,6 +2088,7 @@ fail:
* ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -2097,6 +2105,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -2113,6 +2122,7 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to write to
* @data: value to write
* @lock: true if to take and release semaphore
*
@@ -2184,6 +2194,7 @@ fail:
* ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
@@ -2200,6 +2211,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
@@ -2582,6 +2594,7 @@ STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
bool data;
+ UNREFERENCED_1PARAMETER(hw);
DEBUGFUNC("ixgbe_get_i2c_data");
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
index bda85589..6e03089e 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -123,9 +123,11 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_82599_VF_HV 0x152E
#define IXGBE_DEV_ID_82599_LS 0x154F
+#define IXGBE_DEV_ID_82599_BYPASS 0x155D
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X540_BYPASS 0x155C
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X550T 0x1563
#define IXGBE_DEV_ID_X550T1 0x15D1
@@ -271,7 +273,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_I2C_BB_EN_X550 0x00000100
#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550
#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550
-
#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN)
#define IXGBE_I2C_CLK_OE_N_EN 0
@@ -303,6 +304,47 @@ struct ixgbe_thermal_sensor_data {
struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
};
+
+#define NVM_OROM_OFFSET 0x17
+#define NVM_OROM_BLK_LOW 0x83
+#define NVM_OROM_BLK_HI 0x84
+#define NVM_OROM_PATCH_MASK 0xFF
+#define NVM_OROM_SHIFT 8
+
+#define NVM_VER_MASK 0x00FF /* version mask */
+#define NVM_VER_SHIFT 8 /* version bit shift */
+#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW 0x2D /* version low order word */
+#define NVM_ETK_OFF_HI 0x2E /* version high order word */
+#define NVM_ETK_SHIFT 16 /* high version word shift */
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETK_VALID 0x8000
+#define NVM_INVALID_PTR 0xFFFF
+#define NVM_VER_SIZE 32 /* version sting size */
+
+struct ixgbe_nvm_version {
+ u32 etk_id;
+ u8 nvm_major;
+ u16 nvm_minor;
+ u8 nvm_id;
+
+ bool oem_valid;
+ u8 oem_major;
+ u8 oem_minor;
+ u16 oem_release;
+
+ bool or_valid;
+ u8 or_major;
+ u16 or_build;
+ u8 or_patch;
+
+};
+
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
#define IXGBE_EICS 0x00808
@@ -570,7 +612,6 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */
#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */
#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */
-
#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16
#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
@@ -580,7 +621,6 @@ struct ixgbe_thermal_sensor_data {
/* Four Flexible Filters are supported */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
-
/* Six Flexible Filters are supported */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6
/* Eight Flexible Filters are supported */
@@ -728,8 +768,6 @@ struct ixgbe_dmac_config {
#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */
#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */
-
-
/* Security Control Registers */
#define IXGBE_SECTXCTRL 0x08800
#define IXGBE_SECTXSTAT 0x08804
@@ -867,7 +905,6 @@ struct ixgbe_dmac_config {
#define IXGBE_RTTBCNRTT 0x05150
#define IXGBE_RTTBCNRD 0x0498C
-
/* FCoE DMA Context Registers */
/* FCoE Direct DMA Context */
#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10))
@@ -2422,6 +2459,16 @@ enum {
#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
#define IXGBE_FW_LESM_STATE_1 0x1
#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
+#define IXGBE_FW_LESM_2_STATES_ENABLED_MASK 0x1F
+#define IXGBE_FW_LESM_2_STATES_ENABLED 0x12
+#define IXGBE_FW_LESM_STATE0_10G_ENABLED 0x6FFF
+#define IXGBE_FW_LESM_STATE1_10G_ENABLED 0x4FFF
+#define IXGBE_FW_LESM_STATE0_10G_DISABLED 0x0FFF
+#define IXGBE_FW_LESM_STATE1_10G_DISABLED 0x2FFF
+#define IXGBE_FW_LESM_PORT0_STATE0_OFFSET 0x2
+#define IXGBE_FW_LESM_PORT0_STATE1_OFFSET 0x3
+#define IXGBE_FW_LESM_PORT1_STATE0_OFFSET 0x6
+#define IXGBE_FW_LESM_PORT1_STATE1_OFFSET 0x7
#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
#define IXGBE_FW_PATCH_VERSION_4 0x7
#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
@@ -3708,6 +3755,7 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_fixed,
ixgbe_media_type_fiber_qsfp,
ixgbe_media_type_fiber_lco,
ixgbe_media_type_copper,
@@ -4222,7 +4270,6 @@ struct ixgbe_hw {
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
-
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ (1 << 5)
#define IXGBE_FUSES0_REV_MASK (3 << 6)
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c
index b513190a..5b25a6b4 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -376,6 +376,7 @@ s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
* @next: caller supplied function to return next address in list
+ * @clear: unused
*
* Updates the Multicast Table Array.
**/
@@ -509,8 +510,9 @@ u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
}
/**
- * ixgbe_get_mac_addr_vf - Read device MAC address
- * @hw: pointer to the HW structure
+ * ixgbe_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: the MAC address
**/
s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{
@@ -556,7 +558,6 @@ s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
* ixgbe_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c
index 0e51813b..716664bb 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.c
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -779,6 +779,9 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
swmask |= swi2c_mask;
fwmask |= swi2c_mask << 2;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ timeout = 1000;
+
for (i = 0; i < timeout; i++) {
/* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
@@ -804,14 +807,6 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
msec_delay(5);
}
- /* Failed to get SW only semaphore */
- if (swmask == IXGBE_GSSR_SW_MNG_SM) {
- ERROR_REPORT1(IXGBE_ERROR_POLLING,
- "Failed to get SW only semaphore");
- DEBUGOUT("Failed to get SW only semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
- return IXGBE_ERR_SWFW_SYNC;
- }
-
/* If the resource is not released by the FW/HW the SW can assume that
* the FW/HW malfunctions. In that case the SW should set the SW bit(s)
* of the requested resource(s) while ignoring the corresponding FW/HW
@@ -836,7 +831,8 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
*/
if (swfw_sync & swmask) {
u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
- IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+ IXGBE_GSSR_SW_MNG_SM;
if (swi2c_mask)
rmask |= IXGBE_GSSR_I2C_MASK;
@@ -970,14 +966,25 @@ STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
**/
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
{
+ u32 rmask;
+
/* First try to grab the semaphore but we don't need to bother
- * looking to see whether we got the lock or not since we do
+ * looking to see whether we got the lock or not since we do
* the same thing regardless of whether we got the lock or not.
* We got the lock - we release it.
* We timeout trying to get the lock - we force its release.
*/
ixgbe_get_swfw_sync_semaphore(hw);
ixgbe_release_swfw_sync_semaphore(hw);
+
+ /* Acquire and release all software resources. */
+ rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+ IXGBE_GSSR_SW_MNG_SM;
+
+ rmask |= IXGBE_GSSR_I2C_MASK;
+ ixgbe_acquire_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_X540(hw, rmask);
}
/**
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
index 9862391b..f66f5407 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -336,98 +336,6 @@ STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
}
/**
- * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit address of PHY register to read
- * @dev_type: always unused
- * @phy_data: Pointer to read data from PHY register
- */
-STATIC s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
- u32 dev_type, u16 *phy_data)
-{
- u32 i, data, command;
- UNREFERENCED_1PARAMETER(dev_type);
-
- /* Setup and write the read command */
- command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
- IXGBE_MSCA_MDI_COMMAND;
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /* Check every 10 usec to see if the access completed.
- * The MDI Command bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- usec_delay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
- if (!(command & IXGBE_MSCA_MDI_COMMAND))
- break;
- }
-
- if (command & IXGBE_MSCA_MDI_COMMAND) {
- ERROR_REPORT1(IXGBE_ERROR_POLLING,
- "PHY read command did not complete.\n");
- return IXGBE_ERR_PHY;
- }
-
- /* Read operation is complete. Get the data from MSRWD */
- data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
- data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
- *phy_data = (u16)data;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @dev_type: always unused
- * @phy_data: Data to write to the PHY register
- */
-STATIC s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
- u32 dev_type, u16 phy_data)
-{
- u32 i, command;
- UNREFERENCED_1PARAMETER(dev_type);
-
- /* Put the data in the MDI single read and write data register*/
- IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
-
- /* Setup and write the write command */
- command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
- IXGBE_MSCA_MDI_COMMAND;
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /* Check every 10 usec to see if the access completed.
- * The MDI Command bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- usec_delay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
- if (!(command & IXGBE_MSCA_MDI_COMMAND))
- break;
- }
-
- if (command & IXGBE_MSCA_MDI_COMMAND) {
- ERROR_REPORT1(IXGBE_ERROR_POLLING,
- "PHY write cmd didn't complete\n");
- return IXGBE_ERR_PHY;
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
* ixgbe_identify_phy_x550em - Get PHY type based on device id
* @hw: pointer to hardware structure
*
@@ -467,14 +375,10 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
return ixgbe_identify_phy_generic(hw);
case IXGBE_DEV_ID_X550EM_X_1G_T:
hw->phy.type = ixgbe_phy_ext_1g_t;
- hw->phy.ops.read_reg = NULL;
- hw->phy.ops.write_reg = NULL;
break;
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
hw->phy.type = ixgbe_phy_fw;
- hw->phy.ops.read_reg = NULL;
- hw->phy.ops.write_reg = NULL;
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
else
@@ -1764,9 +1668,12 @@ STATIC s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
return status;
}
+#ifndef PREBOOT_SUPPORT
/**
* ixgbe_setup_sgmii - Set up link for sgmii
* @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
*/
STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
@@ -1831,9 +1738,12 @@ STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
}
+#endif /* PREBOOT_SUPPORT */
/**
- * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
+ * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
* @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
*/
STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
@@ -1953,7 +1863,11 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
case ixgbe_media_type_backplane:
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+#ifdef PREBOOT_SUPPORT
+ mac->ops.setup_link = ixgbe_setup_sgmii_fw;
+#else
mac->ops.setup_link = ixgbe_setup_sgmii;
+#endif /* PREBOOT_SUPPORT */
break;
default:
break;
@@ -2003,8 +1917,18 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
} else {
switch (hw->phy.type) {
case ixgbe_phy_ext_1g_t:
+#ifdef PREBOOT_SUPPORT
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+#endif /* PREBOOT_SUPPORT */
case ixgbe_phy_sgmii:
+#ifdef PREBOOT_SUPPORT
+ *speed = IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_10_FULL;
+#else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
+#endif /* PREBOOT_SUPPORT */
break;
case ixgbe_phy_x550em_kr:
if (hw->mac.type == ixgbe_mac_X550EM_a) {
@@ -2376,10 +2300,10 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
- phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
- phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
- hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
- hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
@@ -2400,6 +2324,9 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
default:
break;
}
@@ -2536,7 +2463,8 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
status);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ status == IXGBE_ERR_PHY_ADDR_INVALID) {
DEBUGOUT("Returning from reset HW due to PHY init failure\n");
return status;
}
@@ -2697,6 +2625,8 @@ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
/**
* ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
* @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: unused
*
* Configure the external PHY and the integrated KR PHY for SFP support.
**/
@@ -2789,8 +2719,10 @@ STATIC s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
/**
* ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
* @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for SFP support.
+ * Configure the integrated PHY for SFP support.
**/
s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -3211,6 +3143,8 @@ s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
/* one word */
buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
+ buffer.pad2 = 0;
+ buffer.pad3 = 0;
status = hw->mac.ops.acquire_swfw_sync(hw, mask);
if (status)
@@ -3269,6 +3203,8 @@ s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
/* convert offset from words to bytes */
buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
+ buffer.pad2 = 0;
+ buffer.pad3 = 0;
status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT);
@@ -3406,6 +3342,8 @@ out:
* @ptr: pointer offset in eeprom
* @size: size of section pointed by ptr, if 0 first word will be used as size
* @csum: address of checksum to update
+ * @buffer: pointer to buffer containing calculated checksum
+ * @buffer_size: size of buffer
*
* Returns error status for any failure
*/
@@ -3740,7 +3678,13 @@ u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
break;
case ixgbe_phy_sgmii:
+#ifdef PREBOOT_SUPPORT
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
+ IXGBE_PHYSICAL_LAYER_100BASE_TX |
+ IXGBE_PHYSICAL_LAYER_10BASE_T;
+#else
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+#endif /* PREBOOT_SUPPORT */
break;
case ixgbe_phy_ext_1g_t:
physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
@@ -3777,6 +3721,7 @@ s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
/**
* ixgbe_disable_rx_x550 - Disable RX unit
+ * @hw: pointer to hardware structure
*
* Enables the Rx DMA unit for x550
**/
@@ -4421,6 +4366,7 @@ STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
* ixgbe_read_phy_reg_x550a - Reads specified PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register using the SWFW lock and PHY
diff --git a/drivers/net/ixgbe/base/meson.build b/drivers/net/ixgbe/base/meson.build
new file mode 100644
index 00000000..3147e110
--- /dev/null
+++ b/drivers/net/ixgbe/base/meson.build
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = [
+ 'ixgbe_82598.c',
+ 'ixgbe_82599.c',
+ 'ixgbe_api.c',
+ 'ixgbe_common.c',
+ 'ixgbe_dcb_82598.c',
+ 'ixgbe_dcb_82599.c',
+ 'ixgbe_dcb.c',
+ 'ixgbe_hv_vf.c',
+ 'ixgbe_mbx.c',
+ 'ixgbe_phy.c',
+ 'ixgbe_vf.c',
+ 'ixgbe_x540.c',
+ 'ixgbe_x550.c'
+]
+
+error_cflags = ['-Wno-unused-value',
+ '-Wno-unused-but-set-variable']
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('ixgbe_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/net/ixgbe/ixgbe_82599_bypass.c b/drivers/net/ixgbe/ixgbe_82599_bypass.c
index de9fa5a7..b16ebd0a 100644
--- a/drivers/net/ixgbe/ixgbe_82599_bypass.c
+++ b/drivers/net/ixgbe/ixgbe_82599_bypass.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include "base/ixgbe_type.h"
diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c
index 38a44936..ae38ce35 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.c
+++ b/drivers/net/ixgbe/ixgbe_bypass.c
@@ -1,39 +1,10 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <time.h>
#include <rte_atomic.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "ixgbe_ethdev.h"
#include "ixgbe_bypass_api.h"
#include "rte_pmd_ixgbe.h"
diff --git a/drivers/net/ixgbe/ixgbe_bypass.h b/drivers/net/ixgbe/ixgbe_bypass.h
index 09155bb3..92befad5 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.h
+++ b/drivers/net/ixgbe/ixgbe_bypass.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _IXGBE_BYPASS_H_
diff --git a/drivers/net/ixgbe/ixgbe_bypass_api.h b/drivers/net/ixgbe/ixgbe_bypass_api.h
index d52fde04..8eb77339 100644
--- a/drivers/net/ixgbe/ixgbe_bypass_api.h
+++ b/drivers/net/ixgbe/ixgbe_bypass_api.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _IXGBE_BYPASS_API_H_
diff --git a/drivers/net/ixgbe/ixgbe_bypass_defines.h b/drivers/net/ixgbe/ixgbe_bypass_defines.h
index d12c2714..7740546b 100644
--- a/drivers/net/ixgbe/ixgbe_bypass_defines.h
+++ b/drivers/net/ixgbe/ixgbe_bypass_defines.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _IXGBE_BYPASS_DEFINES_H_
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index ff19a564..44832585 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <sys/queue.h>
@@ -55,7 +26,7 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_random.h>
@@ -95,6 +66,9 @@
/* Timer value included in XOFF frames. */
#define IXGBE_FC_PAUSE 0x680
+/*Default value of Max Rx Queue*/
+#define IXGBE_MAX_RX_QUEUE_NUM 128
+
#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
@@ -427,6 +401,9 @@ static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
(r) = (h)->bitmap[idx] >> bit & 1;\
} while (0)
+int ixgbe_logtype_init;
+int ixgbe_logtype_driver;
+
/*
* The set of PCI devices this driver supports
*/
@@ -1170,13 +1147,6 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
-#ifdef RTE_LIBRTE_SECURITY
- /* Initialize security_ctx only for primary process*/
- eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev);
- if (eth_dev->security_ctx == NULL)
- return -ENOMEM;
-#endif
-
rte_eth_copy_pci_info(eth_dev, pci_dev);
/* Vendor and Device ID need to be set before init of shared code */
@@ -1203,6 +1173,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Unlock any pending hardware semaphore */
ixgbe_swfw_lock_reset(hw);
+#ifdef RTE_LIBRTE_SECURITY
+ /* Initialize security_ctx only for primary process*/
+ if (ixgbe_ipsec_ctx_create(eth_dev))
+ return -ENOMEM;
+#endif
+
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
ixgbe_dcb_init(hw, dcb_config);
@@ -2194,9 +2170,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
return -EINVAL;
}
- RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
- RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
-
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+ IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+ pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
return 0;
}
@@ -2236,8 +2213,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
case ETH_MQ_RX_NONE:
/* if nothing mq mode configure, use default scheme */
dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
- if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
- RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
break;
default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
/* SRIOV only works in VMDq enable mode */
@@ -3676,7 +3651,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP;
/*
* RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
@@ -3714,8 +3690,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
#ifdef RTE_LIBRTE_SECURITY
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+ if (dev->security_ctx) {
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+ }
#endif
dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -3821,7 +3799,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
@@ -4383,12 +4362,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
ixgbe_dev_link_status_print(dev);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
if (intr->flags & IXGBE_FLAG_MACSEC) {
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
- NULL, NULL);
+ NULL);
intr->flags &= ~IXGBE_FLAG_MACSEC;
}
@@ -5025,7 +5004,11 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- hw->mac.ops.reset_hw(hw);
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
+ return err;
+ }
hw->mac.get_link_status = true;
/* negotiate mailbox API version to use with the PF. */
@@ -5057,7 +5040,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
ixgbevf_dev_rxtx_start(dev);
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
/* According to datasheet, only vector 0/1/2 can be used,
* now only one vector is used for Rx queue
*/
@@ -8165,13 +8149,17 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
u32 in_msg = 0;
- if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
- return;
+ /* peek the message first */
+ in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
/* PF reset VF event */
- if (in_msg == IXGBE_PF_CONTROL_MSG)
+ if (in_msg == IXGBE_PF_CONTROL_MSG) {
+ /* dummy mbx read to ack pf */
+ if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+ return;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
- NULL, NULL);
+ NULL);
+ }
}
static int
@@ -8339,6 +8327,18 @@ ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
}
}
+/* restore rss filter */
+static inline void
+ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->rss_info.num)
+ ixgbe_config_rss_filter(dev,
+ &filter_info->rss_info, TRUE);
+}
+
static int
ixgbe_filter_restore(struct rte_eth_dev *dev)
{
@@ -8347,6 +8347,7 @@ ixgbe_filter_restore(struct rte_eth_dev *dev)
ixgbe_syn_filter_restore(dev);
ixgbe_fdir_filter_restore(dev);
ixgbe_l2_tn_filter_restore(dev);
+ ixgbe_rss_filter_restore(dev);
return 0;
}
@@ -8444,3 +8445,15 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
+
+RTE_INIT(ixgbe_init_log);
+static void
+ixgbe_init_log(void)
+{
+ ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
+ if (ixgbe_logtype_init >= 0)
+ rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE);
+ ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver");
+ if (ixgbe_logtype_driver >= 0)
+ rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 51ddcfd4..c56d6524 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#ifndef _IXGBE_ETHDEV_H_
@@ -224,6 +195,12 @@ struct ixgbe_hw_fdir_info {
bool mask_added; /* If already got mask from consistent filter */
};
+struct ixgbe_rte_flow_rss_conf {
+ struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ uint16_t num; /**< Number of entries in queue[]. */
+ uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
+};
+
/* structure for interrupt relative data */
struct ixgbe_interrupt {
uint32_t flags;
@@ -340,6 +317,8 @@ struct ixgbe_filter_info {
struct ixgbe_5tuple_filter_list fivetuple_list;
/* store the SYN filter info */
uint32_t syn_info;
+ /* store the rss filter info */
+ struct ixgbe_rte_flow_rss_conf rss_info;
};
struct ixgbe_l2_tn_key {
@@ -719,6 +698,8 @@ void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t tx_rate);
+int ixgbe_config_rss_filter(struct rte_eth_dev *dev,
+ struct ixgbe_rte_flow_rss_conf *conf, bool add);
static inline int
ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 9281dc1a..d5e51797 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <stdio.h>
@@ -42,7 +13,7 @@
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include "ixgbe_logs.h"
@@ -70,14 +41,14 @@
#define IXGBE_FDIRCMD_CMD_INTERVAL_US 10
#define IXGBE_FDIR_FLOW_TYPES ( \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
- (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
+ (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER))
#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
uint8_t ipv6_addr[16]; \
@@ -1277,7 +1248,8 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
IXGBE_ATR_FLOW_TYPE_IPV6) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0) &&
- rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) {
PMD_DRV_LOG(ERR, "By this device,"
" IPv4 is not supported without"
" L4 protocol and ports masked!");
@@ -1435,7 +1407,7 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
- uint32_t fdirctrl, max_num;
+ uint32_t fdirctrl, max_num, i;
uint8_t offset;
fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
@@ -1467,9 +1439,11 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info
if (fdir_info->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN ||
fdir_info->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
- fdir_info->flow_types_mask[0] = 0;
+ fdir_info->flow_types_mask[0] = 0ULL;
else
fdir_info->flow_types_mask[0] = IXGBE_FDIR_FLOW_TYPES;
+ for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
+ fdir_info->flow_types_mask[i] = 0ULL;
fdir_info->flex_payload_unit = sizeof(uint16_t);
fdir_info->max_flex_payload_segment_num = 1;
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 19c2d479..dcbfb38b 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <sys/queue.h>
@@ -54,7 +25,7 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
@@ -103,6 +74,11 @@ struct ixgbe_eth_l2_tunnel_conf_ele {
TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
struct rte_eth_l2_tunnel_conf filter_info;
};
+/* rss filter list structure */
+struct ixgbe_rss_conf_ele {
+ TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
+ struct ixgbe_rte_flow_rss_conf filter_info;
+};
/* ixgbe_flow memory list structure */
struct ixgbe_flow_mem {
TAILQ_ENTRY(ixgbe_flow_mem) entries;
@@ -114,6 +90,7 @@ TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
static struct ixgbe_ntuple_filter_list filter_ntuple_list;
@@ -121,6 +98,7 @@ static struct ixgbe_ethertype_filter_list filter_ethertype_list;
static struct ixgbe_syn_filter_list filter_syn_list;
static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+static struct ixgbe_rss_filter_list filter_rss_list;
static struct ixgbe_flow_mem_list ixgbe_flow_list;
/**
@@ -207,6 +185,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_udp *udp_mask;
const struct rte_flow_item_sctp *sctp_spec;
const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ struct rte_flow_item_eth eth_null;
+ struct rte_flow_item_vlan vlan_null;
if (!pattern) {
rte_flow_error_set(error,
@@ -228,6 +212,9 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
+ memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
#ifdef RTE_LIBRTE_SECURITY
/**
* Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
@@ -277,6 +264,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
/* Skip Ethernet */
if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error,
@@ -287,15 +276,20 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
/* if the first item is MAC, the content should be NULL */
- if (item->spec || item->mask) {
+ if ((item->spec || item->mask) &&
+ (memcmp(eth_spec, &eth_null,
+ sizeof(struct rte_flow_item_eth)) ||
+ memcmp(eth_mask, &eth_null,
+ sizeof(struct rte_flow_item_eth)))) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
return -rte_errno;
}
- /* check if the next not void item is IPv4 */
+ /* check if the next not void item is IPv4 or Vlan */
item = next_no_void_pattern(pattern, item);
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
rte_flow_error_set(error,
EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -303,48 +297,82 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
}
- /* get the IPv4 info */
- if (!item->spec || !item->mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Invalid ntuple mask");
- return -rte_errno;
- }
- /*Not supported last point for range*/
- if (item->last) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- item, "Not supported last point for range");
- return -rte_errno;
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* the content should be NULL */
+ if ((item->spec || item->mask) &&
+ (memcmp(vlan_spec, &vlan_null,
+ sizeof(struct rte_flow_item_vlan)) ||
+ memcmp(vlan_mask, &vlan_null,
+ sizeof(struct rte_flow_item_vlan)))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
}
- ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
- /**
- * Only support src & dst addresses, protocol,
- * others should be masked.
- */
- if (ipv4_mask->hdr.version_ihl ||
- ipv4_mask->hdr.type_of_service ||
- ipv4_mask->hdr.total_length ||
- ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.fragment_offset ||
- ipv4_mask->hdr.time_to_live ||
- ipv4_mask->hdr.hdr_checksum) {
+ if (item->mask) {
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
rte_flow_error_set(error,
- EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by ntuple filter");
- return -rte_errno;
- }
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
- filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
- filter->src_ip_mask = ipv4_mask->hdr.src_addr;
- filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
- ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
- filter->dst_ip = ipv4_spec->hdr.dst_addr;
- filter->src_ip = ipv4_spec->hdr.src_addr;
- filter->proto = ipv4_spec->hdr.next_proto_id;
+ ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+ }
/* check if the next not void item is TCP or UDP */
item = next_no_void_pattern(pattern, item);
@@ -359,8 +387,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* get the TCP/UDP info */
if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+ (!item->spec && !item->mask)) {
+ goto action;
+ }
+
+ /* get the TCP/UDP/SCTP info */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END &&
(!item->spec || !item->mask)) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -2466,8 +2499,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
item, "Not supported by fdir filter");
return -rte_errno;
}
- if (nvgre_mask->c_k_s_rsvd0_ver !=
- rte_cpu_to_be_16(0x3000) ||
+ if (nvgre_mask->protocol &&
nvgre_mask->protocol != 0xFFFF) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2475,6 +2507,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
item, "Not supported by fdir filter");
return -rte_errno;
}
+ if (nvgre_mask->c_k_s_rsvd0_ver &&
+ nvgre_mask->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0xFFFF)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
/* TNI must be totally masked or not. */
if (nvgre_mask->tni[0] &&
((nvgre_mask->tni[0] != 0xFF) ||
@@ -2496,7 +2537,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
nvgre_spec =
(const struct rte_flow_item_nvgre *)item->spec;
if (nvgre_spec->c_k_s_rsvd0_ver !=
- rte_cpu_to_be_16(0x2000) ||
+ rte_cpu_to_be_16(0x2000) &&
+ nvgre_mask->c_k_s_rsvd0_ver) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ if (nvgre_mask->protocol &&
nvgre_spec->protocol !=
rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2700,6 +2749,109 @@ step_next:
return ret;
}
+static int
+ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct ixgbe_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ uint16_t n;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+ if (rss->rss_conf)
+ rss_conf->rss_conf = *rss->rss_conf;
+ else
+ rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
+
+ for (n = 0; n < rss->num; ++n)
+ rss_conf->queue[n] = rss->queue[n];
+ rss_conf->num = rss->num;
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* remove the rss filter */
+static void
+ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->rss_info.num)
+ ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
+}
+
void
ixgbe_filterlist_init(void)
{
@@ -2708,6 +2860,7 @@ ixgbe_filterlist_init(void)
TAILQ_INIT(&filter_syn_list);
TAILQ_INIT(&filter_fdir_list);
TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&filter_rss_list);
TAILQ_INIT(&ixgbe_flow_list);
}
@@ -2720,6 +2873,7 @@ ixgbe_filterlist_flush(void)
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ struct ixgbe_rss_conf_ele *rss_filter_ptr;
while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
TAILQ_REMOVE(&filter_ntuple_list,
@@ -2756,6 +2910,13 @@ ixgbe_filterlist_flush(void)
rte_free(fdir_rule_ptr);
}
+ while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr,
+ entries);
+ rte_free(rss_filter_ptr);
+ }
+
while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
TAILQ_REMOVE(&ixgbe_flow_list,
ixgbe_flow_mem_ptr,
@@ -2786,12 +2947,14 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf l2_tn_filter;
struct ixgbe_hw_fdir_info *fdir_info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_rte_flow_rss_conf rss_conf;
struct rte_flow *flow = NULL;
struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_rss_conf_ele *rss_filter_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
uint8_t first_mask = FALSE;
@@ -2992,6 +3155,29 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
}
}
+ memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ ret = ixgbe_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+ if (!ret) {
+ ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
+ if (!ret) {
+ rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
+ sizeof(struct ixgbe_rss_conf_ele), 0);
+ if (!rss_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&rss_filter_ptr->filter_info,
+ &rss_conf,
+ sizeof(struct ixgbe_rte_flow_rss_conf));
+ TAILQ_INSERT_TAIL(&filter_rss_list,
+ rss_filter_ptr, entries);
+ flow->rule = rss_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_HASH;
+ return flow;
+ }
+ }
+
out:
TAILQ_REMOVE(&ixgbe_flow_list,
ixgbe_flow_mem_ptr, entries);
@@ -3020,6 +3206,7 @@ ixgbe_flow_validate(struct rte_eth_dev *dev,
struct rte_eth_syn_filter syn_filter;
struct rte_eth_l2_tunnel_conf l2_tn_filter;
struct ixgbe_fdir_rule fdir_rule;
+ struct ixgbe_rte_flow_rss_conf rss_conf;
int ret;
memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
@@ -3049,6 +3236,12 @@ ixgbe_flow_validate(struct rte_eth_dev *dev,
memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
actions, &l2_tn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ ret = ixgbe_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
return ret;
}
@@ -3075,6 +3268,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
struct ixgbe_hw_fdir_info *fdir_info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_rss_conf_ele *rss_filter_ptr;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
@@ -3143,6 +3337,17 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
rte_free(l2_tn_filter_ptr);
}
break;
+ case RTE_ETH_FILTER_HASH:
+ rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
+ pmd_flow->rule;
+ ret = ixgbe_config_rss_filter(dev,
+ &rss_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr, entries);
+ rte_free(rss_filter_ptr);
+ }
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -3194,6 +3399,8 @@ ixgbe_flow_flush(struct rte_eth_dev *dev,
return ret;
}
+ ixgbe_clear_rss_filter(dev);
+
ixgbe_filterlist_flush();
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 105da11a..176ec0fd 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -1,37 +1,8 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_ip.h>
#include <rte_jhash.h>
@@ -70,6 +41,8 @@ static void
ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
+ dev->data->dev_private);
int i = 0;
/* clear Rx IP table*/
@@ -106,6 +79,10 @@ ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
IXGBE_WAIT_TWRITE;
}
+
+ memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));
+ memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));
+ memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));
}
static int
@@ -173,16 +150,6 @@ ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
priv->rx_sa_tbl[sa_index].spi =
rte_cpu_to_be_32(ic_session->spi);
priv->rx_sa_tbl[sa_index].ip_index = ip_index;
- priv->rx_sa_tbl[sa_index].key[3] =
- rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
- priv->rx_sa_tbl[sa_index].key[2] =
- rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
- priv->rx_sa_tbl[sa_index].key[1] =
- rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
- priv->rx_sa_tbl[sa_index].key[0] =
- rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
- priv->rx_sa_tbl[sa_index].salt =
- rte_cpu_to_be_32(ic_session->salt);
priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION)
priv->rx_sa_tbl[sa_index].mode |=
@@ -225,15 +192,15 @@ ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
IPSRXIDX_TABLE_KEY | (sa_index << 3);
IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0),
- priv->rx_sa_tbl[sa_index].key[0]);
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]));
IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1),
- priv->rx_sa_tbl[sa_index].key[1]);
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]));
IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2),
- priv->rx_sa_tbl[sa_index].key[2]);
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]));
IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3),
- priv->rx_sa_tbl[sa_index].key[3]);
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]));
IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT,
- priv->rx_sa_tbl[sa_index].salt);
+ rte_cpu_to_be_32(ic_session->salt));
IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD,
priv->rx_sa_tbl[sa_index].mode);
IXGBE_WAIT_RWRITE;
@@ -257,32 +224,22 @@ ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
priv->tx_sa_tbl[sa_index].spi =
rte_cpu_to_be_32(ic_session->spi);
- priv->tx_sa_tbl[sa_index].key[3] =
- rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
- priv->tx_sa_tbl[sa_index].key[2] =
- rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
- priv->tx_sa_tbl[sa_index].key[1] =
- rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
- priv->tx_sa_tbl[sa_index].key[0] =
- rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
- priv->tx_sa_tbl[sa_index].salt =
- rte_cpu_to_be_32(ic_session->salt);
+ priv->tx_sa_tbl[i].used = 1;
+ ic_session->sa_index = sa_index;
+ /* write Key table entry*/
reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0),
- priv->tx_sa_tbl[sa_index].key[0]);
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]));
IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1),
- priv->tx_sa_tbl[sa_index].key[1]);
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]));
IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2),
- priv->tx_sa_tbl[sa_index].key[2]);
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]));
IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3),
- priv->tx_sa_tbl[sa_index].key[3]);
+ rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]));
IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT,
- priv->tx_sa_tbl[sa_index].salt);
+ rte_cpu_to_be_32(ic_session->salt));
IXGBE_WAIT_TWRITE;
-
- priv->tx_sa_tbl[i].used = 1;
- ic_session->sa_index = sa_index;
}
return 0;
@@ -445,6 +402,12 @@ ixgbe_crypto_create_session(void *device,
return 0;
}
+static unsigned int
+ixgbe_crypto_session_get_size(__rte_unused void *device)
+{
+ return sizeof(struct ixgbe_crypto_session);
+}
+
static int
ixgbe_crypto_remove_session(void *device,
struct rte_security_session *session)
@@ -717,21 +680,44 @@ ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
static struct rte_security_ops ixgbe_security_ops = {
.session_create = ixgbe_crypto_create_session,
.session_update = NULL,
+ .session_get_size = ixgbe_crypto_session_get_size,
.session_stats_get = NULL,
.session_destroy = ixgbe_crypto_remove_session,
.set_pkt_metadata = ixgbe_crypto_update_mb,
.capabilities_get = ixgbe_crypto_capabilities_get
};
-struct rte_security_ctx *
+static int
+ixgbe_crypto_capable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t reg_i, reg, capable = 1;
+ /* test if rx crypto can be enabled and then write back initial value*/
+ reg_i = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+ reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ if (reg != 0)
+ capable = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg_i);
+ return capable;
+}
+
+int
ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
{
- struct rte_security_ctx *ctx = rte_malloc("rte_security_instances_ops",
- sizeof(struct rte_security_ctx), 0);
- if (ctx) {
- ctx->device = (void *)dev;
- ctx->ops = &ixgbe_security_ops;
- ctx->sess_cnt = 0;
+ struct rte_security_ctx *ctx = NULL;
+
+ if (ixgbe_crypto_capable(dev)) {
+ ctx = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (ctx) {
+ ctx->device = (void *)dev;
+ ctx->ops = &ixgbe_security_ops;
+ ctx->sess_cnt = 0;
+ dev->security_ctx = ctx;
+ } else {
+ return -ENOMEM;
+ }
}
- return ctx;
+ return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.h b/drivers/net/ixgbe/ixgbe_ipsec.h
index fb8fefc8..c73e1806 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.h
+++ b/drivers/net/ixgbe/ixgbe_ipsec.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#ifndef IXGBE_IPSEC_H_
@@ -107,16 +78,12 @@ struct ixgbe_crypto_rx_ip_table {
struct ixgbe_crypto_rx_sa_table {
uint32_t spi;
uint32_t ip_index;
- uint32_t key[4];
- uint32_t salt;
uint8_t mode;
uint8_t used;
};
struct ixgbe_crypto_tx_sa_table {
uint32_t spi;
- uint32_t key[4];
- uint32_t salt;
uint8_t used;
};
@@ -139,8 +106,7 @@ struct ixgbe_ipsec {
};
-struct rte_security_ctx *
-ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
+int ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
const void *ip_spec,
diff --git a/drivers/net/ixgbe/ixgbe_logs.h b/drivers/net/ixgbe/ixgbe_logs.h
index 53ba42d9..dc73e9bd 100644
--- a/drivers/net/ixgbe/ixgbe_logs.h
+++ b/drivers/net/ixgbe/ixgbe_logs.h
@@ -1,47 +1,16 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _IXGBE_LOGS_H_
#define _IXGBE_LOGS_H_
+extern int ixgbe_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+ rte_log(RTE_LOG_ ## level, ixgbe_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while(0)
-#endif
#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -64,12 +33,10 @@
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
#endif
-#ifdef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
+extern int ixgbe_logtype_driver;
#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, ixgbe_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 676e92c7..ea997371 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <stdio.h>
@@ -44,7 +15,7 @@
#include <rte_debug.h>
#include <rte_eal.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_memcpy.h>
#include <rte_malloc.h>
#include <rte_random.h>
@@ -273,7 +244,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- gpie |= IXGBE_GPIE_MSIX_MODE;
+ gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
case ETH_64_POOLS:
@@ -768,7 +739,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
/* notify application about VF reset */
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
- NULL, &ret_param);
+ &ret_param);
return ret;
}
@@ -780,7 +751,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
* if ret_param.retval > 1, do nothing and send NAK to VF
*/
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
- NULL, &ret_param);
+ &ret_param);
retval = ret_param.retval;
diff --git a/drivers/net/ixgbe/ixgbe_regs.h b/drivers/net/ixgbe/ixgbe_regs.h
index 2aa48201..9c953370 100644
--- a/drivers/net/ixgbe/ixgbe_regs.h
+++ b/drivers/net/ixgbe/ixgbe_regs.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 Intel Corporation
*/
#ifndef _IXGBE_REGS_H_
#define _IXGBE_REGS_H_
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9bc84624..6c582b4b 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1,35 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * Copyright 2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation.
+ * Copyright 2014 6WIND S.A.
*/
#include <sys/queue.h>
@@ -62,7 +33,7 @@
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
@@ -5550,6 +5521,71 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
}
}
+int
+ixgbe_config_rss_filter(struct rte_eth_dev *dev,
+ struct ixgbe_rte_flow_rss_conf *conf, bool add)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
+ struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+
+ if (!add) {
+ if (memcmp(conf, &filter_info->rss_info,
+ sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) {
+ ixgbe_rss_disable(dev);
+ memset(&filter_info->rss_info, 0,
+ sizeof(struct ixgbe_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (filter_info->rss_info.num)
+ return -EINVAL;
+ /* Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
+ if (j == conf->num)
+ j = 0;
+ reta = (reta << 8) | conf->queue[j];
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(hw, reta_reg,
+ rte_bswap32(reta));
+ }
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
+ ixgbe_rss_disable(dev);
+ return -EINVAL;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ ixgbe_hw_rss_hash_set(hw, &rss_conf);
+
+ rte_memcpy(&filter_info->rss_info,
+ conf, sizeof(struct ixgbe_rte_flow_rss_conf));
+
+ return 0;
+}
+
/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
int __attribute__((weak))
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index cc7c8288..69c718bc 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _IXGBE_RXTX_H_
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 9fc112b1..414840a2 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -1,40 +1,11 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _IXGBE_RXTX_VEC_COMMON_H_
#define _IXGBE_RXTX_VEC_COMMON_H_
#include <stdint.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "ixgbe_ethdev.h"
#include "ixgbe_rxtx.h"
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index 2e87ffa0..e0f9998f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -1,38 +1,9 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <stdint.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include "ixgbe_ethdev.h"
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index 486239ba..c9ba4824 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -1,38 +1,9 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <stdint.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include "ixgbe_ethdev.h"
diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c
index ca4182c9..73845a73 100644
--- a/drivers/net/ixgbe/ixgbe_tm.c
+++ b/drivers/net/ixgbe/ixgbe_tm.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <rte_malloc.h>
diff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build
new file mode 100644
index 00000000..60af0bae
--- /dev/null
+++ b/drivers/net/ixgbe/meson.build
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS']
+
+subdir('base')
+objs = [base_objs]
+
+allow_experimental_apis = true
+sources = files(
+ 'ixgbe_82599_bypass.c',
+ 'ixgbe_bypass.c',
+ 'ixgbe_ethdev.c',
+ 'ixgbe_fdir.c',
+ 'ixgbe_flow.c',
+ 'ixgbe_ipsec.c',
+ 'ixgbe_pf.c',
+ 'ixgbe_rxtx.c',
+ 'ixgbe_tm.c',
+ 'rte_pmd_ixgbe.c'
+)
+
+deps += ['hash', 'security']
+
+if arch_subdir == 'x86'
+ dpdk_conf.set('RTE_IXGBE_INC_VECTOR', 1)
+ sources += files('ixgbe_rxtx_vec_sse.c')
+endif
+
+includes += include_directories('base')
+
+install_headers('rte_pmd_ixgbe.h')
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index f1273785..d8ca8ca3 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -1,37 +1,8 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "base/ixgbe_api.h"
#include "ixgbe_ethdev.h"
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 81b18f87..11a9f334 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
*/
/**
@@ -39,7 +11,7 @@
#ifndef _PMD_IXGBE_H_
#define _PMD_IXGBE_H_
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
/**
* Notify VF when PF link status changes.
diff --git a/drivers/net/kni/Makefile b/drivers/net/kni/Makefile
index a3f51f92..01eaef05 100644
--- a/drivers/net/kni/Makefile
+++ b/drivers/net/kni/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index 8f269532..dc4e65f5 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -1,41 +1,12 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <fcntl.h>
#include <pthread.h>
#include <unistd.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_vdev.h>
#include <rte_kni.h>
#include <rte_kvargs.h>
@@ -90,7 +61,7 @@ static const struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_AUTONEG,
+ .link_autoneg = ETH_LINK_AUTONEG,
};
static int is_kni_initialized;
diff --git a/drivers/net/liquidio/Makefile b/drivers/net/liquidio/Makefile
index 5110099f..fc5f18ad 100644
--- a/drivers/net/liquidio/Makefile
+++ b/drivers/net/liquidio/Makefile
@@ -1,34 +1,5 @@
-#
-# BSD LICENSE
-#
-# Copyright(c) 2017 Cavium, Inc.. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Cavium, Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
#
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/liquidio/base/lio_23xx_reg.h b/drivers/net/liquidio/base/lio_23xx_reg.h
index 794bc2ca..9f28504b 100644
--- a/drivers/net/liquidio/base/lio_23xx_reg.h
+++ b/drivers/net/liquidio/base/lio_23xx_reg.h
@@ -1,34 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef _LIO_23XX_REG_H_
diff --git a/drivers/net/liquidio/base/lio_23xx_vf.c b/drivers/net/liquidio/base/lio_23xx_vf.c
index e30c20dc..ddbc8c0e 100644
--- a/drivers/net/liquidio/base/lio_23xx_vf.c
+++ b/drivers/net/liquidio/base/lio_23xx_vf.c
@@ -1,39 +1,10 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include <string.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
@@ -150,6 +121,8 @@ cn23xx_vf_setup_global_output_regs(struct lio_device *lio_dev)
reg_val &= 0xEFFFFFFFFFFFFFFFL;
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no), reg_val);
+
reg_val =
lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
@@ -211,7 +184,7 @@ cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no)
/* Write the start of the input queue's ring and its size */
lio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
iq->base_addr_dma);
- lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
+ lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->nb_desc);
/* Remember the doorbell & instruction count register addr
* for this queue
@@ -243,7 +216,7 @@ cn23xx_vf_setup_oq_regs(struct lio_device *lio_dev, uint32_t oq_no)
lio_write_csr64(lio_dev, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
droq->desc_ring_dma);
- lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->nb_desc);
lio_write_csr(lio_dev, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
(droq->buffer_size | (OCTEON_RH_SIZE << 16)));
@@ -538,51 +511,3 @@ cn23xx_vf_setup_device(struct lio_device *lio_dev)
return 0;
}
-int
-cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev)
-{
- uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
- uint64_t q_no;
-
- /* Disable the i/p and o/p queues for this Octeon.
- * IOQs will already be in reset.
- * If RST bit is set, wait for Quiet bit to be set
- * Once Quiet bit is set, clear the RST bit
- */
- PMD_INIT_FUNC_TRACE();
-
- for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
- volatile uint64_t reg_val;
-
- reg_val = lio_read_csr64(lio_dev,
- CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
- while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && !(reg_val &
- CN23XX_PKT_INPUT_CTL_QUIET) && loop) {
- reg_val = lio_read_csr64(
- lio_dev,
- CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
- loop = loop - 1;
- }
-
- if (loop == 0) {
- lio_dev_err(lio_dev,
- "clearing the reset reg failed or setting the quiet reg failed for qno %lu\n",
- (unsigned long)q_no);
- return -1;
- }
-
- reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
- lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
- reg_val);
-
- reg_val = lio_read_csr64(lio_dev,
- CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
- if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
- lio_dev_err(lio_dev, "unable to reset qno %lu\n",
- (unsigned long)q_no);
- return -1;
- }
- }
-
- return 0;
-}
diff --git a/drivers/net/liquidio/base/lio_23xx_vf.h b/drivers/net/liquidio/base/lio_23xx_vf.h
index ad8db0df..8e5362db 100644
--- a/drivers/net/liquidio/base/lio_23xx_vf.h
+++ b/drivers/net/liquidio/base/lio_23xx_vf.h
@@ -1,34 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef _LIO_23XX_VF_H_
@@ -80,11 +51,6 @@ lio_get_conf(struct lio_device *lio_dev)
return default_lio_conf;
}
-/** Turns off the input and output queues for the device
- * @param lio_dev which device io queues to disable
- */
-int cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev);
-
#define CN23XX_VF_BUSY_READING_REG_LOOP_COUNT 100000
void cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev);
diff --git a/drivers/net/liquidio/base/lio_hw_defs.h b/drivers/net/liquidio/base/lio_hw_defs.h
index fe5c3bbb..5e119c12 100644
--- a/drivers/net/liquidio/base/lio_hw_defs.h
+++ b/drivers/net/liquidio/base/lio_hw_defs.h
@@ -1,34 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef _LIO_HW_DEFS_H_
@@ -113,6 +84,7 @@ enum lio_card_type {
#define LIO_FW_VERSION_LENGTH 32
+#define LIO_Q_RECONF_MIN_VERSION "1.7.0"
#define LIO_VF_TRUST_MIN_VERSION "1.7.1"
/** Tag types used by Octeon cores in its work. */
@@ -156,6 +128,7 @@ enum octeon_tag_type {
#define LIO_CMD_ADD_VLAN_FILTER 0x17
#define LIO_CMD_DEL_VLAN_FILTER 0x18
#define LIO_CMD_VXLAN_PORT_CONFIG 0x19
+#define LIO_CMD_QUEUE_COUNT_CTL 0x1f
#define LIO_CMD_VXLAN_PORT_ADD 0x0
#define LIO_CMD_VXLAN_PORT_DEL 0x1
diff --git a/drivers/net/liquidio/base/lio_mbox.c b/drivers/net/liquidio/base/lio_mbox.c
index b4abc623..11290015 100644
--- a/drivers/net/liquidio/base/lio_mbox.c
+++ b/drivers/net/liquidio/base/lio_mbox.c
@@ -1,37 +1,8 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_cycles.h>
#include "lio_logs.h"
diff --git a/drivers/net/liquidio/base/lio_mbox.h b/drivers/net/liquidio/base/lio_mbox.h
index b0875d64..457917e9 100644
--- a/drivers/net/liquidio/base/lio_mbox.h
+++ b/drivers/net/liquidio/base/lio_mbox.h
@@ -1,34 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef _LIO_MBOX_H_
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index 84b8a328..e1a20cd6 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -1,37 +1,8 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
@@ -43,6 +14,9 @@
#include "lio_ethdev.h"
#include "lio_rxtx.h"
+int lio_logtype_init;
+int lio_logtype_driver;
+
/* Default RSS key in use */
static uint8_t lio_rss_key[40] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
@@ -1228,12 +1202,10 @@ lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
- if ((lio_dev->droq[fw_mapped_oq]) &&
- (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
- lio_dev_err(lio_dev,
- "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
- lio_dev->droq[fw_mapped_oq]->max_count);
- return -ENOTSUP;
+ /* Free previous allocation if any */
+ if (eth_dev->data->rx_queues[q_no] != NULL) {
+ lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
+ eth_dev->data->rx_queues[q_no] = NULL;
}
mbp_priv = rte_mempool_get_priv(mp);
@@ -1267,10 +1239,6 @@ lio_dev_rx_queue_release(void *rxq)
int oq_no;
if (droq) {
- /* Run time queue deletion not supported */
- if (droq->lio_dev->port_configured)
- return;
-
oq_no = droq->q_no;
lio_delete_droq_queue(droq->lio_dev, oq_no);
}
@@ -1314,12 +1282,10 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
- if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
- (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
- lio_dev_err(lio_dev,
- "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
- lio_dev->instr_queue[fw_mapped_iq]->max_count);
- return -ENOTSUP;
+ /* Free previous allocation if any */
+ if (eth_dev->data->tx_queues[q_no] != NULL) {
+ lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
+ eth_dev->data->tx_queues[q_no] = NULL;
}
retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
@@ -1331,7 +1297,7 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
}
retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
- lio_dev->instr_queue[fw_mapped_iq]->max_count,
+ lio_dev->instr_queue[fw_mapped_iq]->nb_desc,
socket_id);
if (retval) {
@@ -1362,10 +1328,6 @@ lio_dev_tx_queue_release(void *txq)
if (tq) {
- /* Run time queue deletion not supported */
- if (tq->lio_dev->port_configured)
- return;
-
/* Free sg_list */
lio_delete_sglist(tq);
@@ -1534,6 +1496,8 @@ lio_dev_stop(struct rte_eth_dev *eth_dev)
lio_send_rx_ctrl_cmd(eth_dev, 0);
+ lio_wait_for_instr_fetch(lio_dev);
+
/* Clear recorded link status */
lio_dev->linfo.link.link_status64 = 0;
}
@@ -1607,34 +1571,14 @@ static void
lio_dev_close(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
- uint32_t i;
lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
if (lio_dev->intf_open)
lio_dev_stop(eth_dev);
- lio_wait_for_instr_fetch(lio_dev);
-
- lio_dev->fn_list.disable_io_queues(lio_dev);
-
- cn23xx_vf_set_io_queues_off(lio_dev);
-
- /* Reset iq regs (IQ_DBELL).
- * Clear sli_pktx_cnts (OQ_PKTS_SENT).
- */
- for (i = 0; i < lio_dev->nb_rx_queues; i++) {
- struct lio_droq *droq = lio_dev->droq[i];
-
- if (droq == NULL)
- break;
-
- uint32_t pkt_count = rte_read32(droq->pkts_sent_reg);
-
- lio_dev_dbg(lio_dev,
- "pending oq count %u\n", pkt_count);
- rte_write32(pkt_count, droq->pkts_sent_reg);
- }
+ /* Reset ioq regs */
+ lio_dev->fn_list.setup_device_regs(lio_dev);
if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) {
cn23xx_vf_ask_pf_to_do_flr(lio_dev);
@@ -1724,7 +1668,76 @@ lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
}
-static int lio_dev_configure(struct rte_eth_dev *eth_dev)
+static int
+lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,
+ int num_rxq)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {
+ lio_dev_err(lio_dev, "Require firmware version >= %s\n",
+ LIO_Q_RECONF_MIN_VERSION);
+ return -ENOTSUP;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;
+ ctrl_pkt.ncmd.s.param1 = num_txq;
+ ctrl_pkt.ncmd.s.param2 = num_rxq;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send queue count control command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Queue count control command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (lio_dev->nb_rx_queues != num_rxq ||
+ lio_dev->nb_tx_queues != num_txq) {
+ if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))
+ return -1;
+ lio_dev->nb_rx_queues = num_rxq;
+ lio_dev->nb_tx_queues = num_txq;
+ }
+
+ if (lio_dev->intf_open)
+ lio_dev_stop(eth_dev);
+
+ /* Reset ioq registers */
+ if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
+ lio_dev_err(lio_dev, "Failed to configure device registers\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_configure(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
@@ -1737,22 +1750,21 @@ static int lio_dev_configure(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
- /* Re-configuring firmware not supported.
- * Can't change tx/rx queues per port from initial value.
+ /* Inform firmware about change in number of queues to use.
+ * Disable IO queues and reset registers for re-configuration.
*/
- if (lio_dev->port_configured) {
- if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) ||
- (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) {
- lio_dev_err(lio_dev,
- "rxq/txq re-conf not supported. Restart application with new value.\n");
- return -ENOTSUP;
- }
- return 0;
- }
+ if (lio_dev->port_configured)
+ return lio_reconf_queues(eth_dev,
+ eth_dev->data->nb_tx_queues,
+ eth_dev->data->nb_rx_queues);
lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
+ /* Set max number of queues which can be re-configured. */
+ lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;
+ lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;
+
resp_size = sizeof(struct lio_if_cfg_resp);
sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
if (sc == NULL)
@@ -1879,9 +1891,6 @@ static int lio_dev_configure(struct rte_eth_dev *eth_dev)
lio_free_soft_command(sc);
- /* Disable iq_0 for reconf */
- lio_dev->fn_list.disable_io_queues(lio_dev);
-
/* Reset ioq regs */
lio_dev->fn_list.setup_device_regs(lio_dev);
@@ -2021,11 +2030,6 @@ lio_first_time_init(struct lio_device *lio_dev,
rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
}
- if (cn23xx_vf_set_io_queues_off(lio_dev)) {
- lio_dev_err(lio_dev, "Setting io queues off failed\n");
- goto error;
- }
-
if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
lio_dev_err(lio_dev, "Failed to configure device registers\n");
goto error;
@@ -2180,3 +2184,15 @@ static struct rte_pci_driver rte_liovf_pmd = {
RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
+
+RTE_INIT(lio_init_log);
+static void
+lio_init_log(void)
+{
+ lio_logtype_init = rte_log_register("pmd.net.liquidio.init");
+ if (lio_logtype_init >= 0)
+ rte_log_set_level(lio_logtype_init, RTE_LOG_NOTICE);
+ lio_logtype_driver = rte_log_register("pmd.net.liquidio.driver");
+ if (lio_logtype_driver >= 0)
+ rte_log_set_level(lio_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/liquidio/lio_ethdev.h b/drivers/net/liquidio/lio_ethdev.h
index 655c2011..74cd2fb6 100644
--- a/drivers/net/liquidio/lio_ethdev.h
+++ b/drivers/net/liquidio/lio_ethdev.h
@@ -1,34 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef _LIO_ETHDEV_H_
diff --git a/drivers/net/liquidio/lio_logs.h b/drivers/net/liquidio/lio_logs.h
index a4c9ca4d..f2278270 100644
--- a/drivers/net/liquidio/lio_logs.h
+++ b/drivers/net/liquidio/lio_logs.h
@@ -1,41 +1,14 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef _LIO_LOGS_H_
#define _LIO_LOGS_H_
-#define lio_dev_printf(lio_dev, level, fmt, args...) \
- RTE_LOG(level, PMD, "%s" fmt, (lio_dev)->dev_string, ##args)
+extern int lio_logtype_driver;
+#define lio_dev_printf(lio_dev, level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, lio_logtype_driver, \
+ "%s" fmt, (lio_dev)->dev_string, ##args)
#define lio_dev_info(lio_dev, fmt, args...) \
lio_dev_printf(lio_dev, INFO, "INFO: " fmt, ##args)
@@ -43,22 +16,16 @@
#define lio_dev_err(lio_dev, fmt, args...) \
lio_dev_printf(lio_dev, ERR, "ERROR: %s() " fmt, __func__, ##args)
-#define PMD_INIT_LOG(level, fmt, args...) RTE_LOG(level, PMD, fmt, ## args)
+extern int lio_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, lio_logtype_init, \
+ fmt, ## args)
/* Enable these through config options */
-
-#ifdef RTE_LIBRTE_LIO_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, "%s() >>\n", __func__)
-#else /* !RTE_LIBRTE_LIO_DEBUG_INIT */
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif /* RTE_LIBRTE_LIO_DEBUG_INIT */
-#ifdef RTE_LIBRTE_LIO_DEBUG_DRIVER
#define lio_dev_dbg(lio_dev, fmt, args...) \
lio_dev_printf(lio_dev, DEBUG, "DEBUG: %s() " fmt, __func__, ##args)
-#else /* !RTE_LIBRTE_LIO_DEBUG_DRIVER */
-#define lio_dev_dbg(lio_dev, fmt, args...) do { } while (0)
-#endif /* RTE_LIBRTE_LIO_DEBUG_DRIVER */
#ifdef RTE_LIBRTE_LIO_DEBUG_RX
#define PMD_RX_LOG(lio_dev, level, fmt, args...) \
diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c
index 376893ac..8d705bfe 100644
--- a/drivers/net/liquidio/lio_rxtx.c
+++ b/drivers/net/liquidio/lio_rxtx.c
@@ -1,37 +1,8 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
@@ -42,7 +13,7 @@
#define LIO_MAX_SG 12
/* Flush iq if available tx_desc fall below LIO_FLUSH_WM */
-#define LIO_FLUSH_WM(_iq) ((_iq)->max_count / 2)
+#define LIO_FLUSH_WM(_iq) ((_iq)->nb_desc / 2)
#define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL
static void
@@ -70,7 +41,7 @@ lio_droq_destroy_ring_buffers(struct lio_droq *droq)
{
uint32_t i;
- for (i = 0; i < droq->max_count; i++) {
+ for (i = 0; i < droq->nb_desc; i++) {
if (droq->recv_buf_list[i].buffer) {
rte_pktmbuf_free((struct rte_mbuf *)
droq->recv_buf_list[i].buffer);
@@ -89,7 +60,7 @@ lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
uint32_t i;
void *buf;
- for (i = 0; i < droq->max_count; i++) {
+ for (i = 0; i < droq->nb_desc; i++) {
buf = rte_pktmbuf_alloc(droq->mpool);
if (buf == NULL) {
lio_dev_err(lio_dev, "buffer alloc failed\n");
@@ -164,7 +135,7 @@ lio_alloc_info_buffer(struct lio_device *lio_dev,
{
droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
"info_list", droq->q_no,
- (droq->max_count *
+ (droq->nb_desc *
LIO_DROQ_INFO_SIZE),
RTE_CACHE_LINE_SIZE,
socket_id);
@@ -206,10 +177,10 @@ lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
- droq->max_count = num_descs;
+ droq->nb_desc = num_descs;
droq->buffer_size = desc_size;
- desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE;
+ desc_ring_size = droq->nb_desc * LIO_DROQ_DESC_SIZE;
droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
"droq", q_no,
desc_ring_size,
@@ -228,7 +199,7 @@ lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
- droq->max_count);
+ droq->nb_desc);
droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
if (droq->info_list == NULL) {
@@ -237,7 +208,7 @@ lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
}
droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
- (droq->max_count *
+ (droq->nb_desc *
LIO_DROQ_RECVBUF_SIZE),
RTE_CACHE_LINE_SIZE,
socket_id);
@@ -274,11 +245,6 @@ lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
PMD_INIT_FUNC_TRACE();
- if (lio_dev->droq[oq_no]) {
- lio_dev_dbg(lio_dev, "Droq %d in use\n", oq_no);
- return 0;
- }
-
/* Allocate the DS for the new droq. */
droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
RTE_CACHE_LINE_SIZE, socket_id);
@@ -303,7 +269,7 @@ lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
/* Send credit for octeon output queues. credits are always
* sent after the output queue is enabled.
*/
- rte_write32(lio_dev->droq[oq_no]->max_count,
+ rte_write32(lio_dev->droq[oq_no]->nb_desc,
lio_dev->droq[oq_no]->pkts_credit_reg);
rte_wmb();
@@ -342,13 +308,13 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq,
do {
droq->refill_idx = lio_incr_index(
droq->refill_idx, 1,
- droq->max_count);
+ droq->nb_desc);
desc_refilled++;
droq->refill_count--;
} while (droq->recv_buf_list[droq->refill_idx].buffer);
}
refill_index = lio_incr_index(refill_index, 1,
- droq->max_count);
+ droq->nb_desc);
} /* while */
return desc_refilled;
@@ -379,7 +345,7 @@ lio_droq_refill(struct lio_droq *droq)
desc_ring = droq->desc_ring;
- while (droq->refill_count && (desc_refilled < droq->max_count)) {
+ while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
/* If a valid buffer exists (happens if there is no dispatch),
* reuse the buffer, else allocate.
*/
@@ -402,7 +368,7 @@ lio_droq_refill(struct lio_droq *droq)
droq->info_list[droq->refill_idx].length = 0;
droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
- droq->max_count);
+ droq->nb_desc);
desc_refilled++;
droq->refill_count--;
}
@@ -449,7 +415,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
(uint32_t)info->length);
droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
- droq->max_count);
+ droq->nb_desc);
droq->refill_count += buf_cnt;
} else {
if (info->length <= droq->buffer_size) {
@@ -462,7 +428,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
droq->recv_buf_list[droq->read_idx].buffer = NULL;
droq->read_idx = lio_incr_index(
droq->read_idx, 1,
- droq->max_count);
+ droq->nb_desc);
droq->refill_count++;
if (likely(nicbuf != NULL)) {
@@ -556,7 +522,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
pkt_len += cpy_len;
droq->read_idx = lio_incr_index(
droq->read_idx,
- 1, droq->max_count);
+ 1, droq->nb_desc);
droq->refill_count++;
/* Prefetch buffer pointers when on a
@@ -737,7 +703,7 @@ lio_init_instr_queue(struct lio_device *lio_dev,
iq->base_addr_dma = iq->iq_mz->iova;
iq->base_addr = (uint8_t *)iq->iq_mz->addr;
- iq->max_count = num_descs;
+ iq->nb_desc = num_descs;
/* Initialize a list to holds requests that have been posted to Octeon
* but has yet to be fetched by octeon
@@ -756,7 +722,7 @@ lio_init_instr_queue(struct lio_device *lio_dev,
lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
- iq->max_count);
+ iq->nb_desc);
iq->lio_dev = lio_dev;
iq->txpciq.txpciq64 = txpciq.txpciq64;
@@ -853,14 +819,6 @@ lio_setup_iq(struct lio_device *lio_dev, int q_index,
{
uint32_t iq_no = (uint32_t)txpciq.s.q_no;
- if (lio_dev->instr_queue[iq_no]) {
- lio_dev_dbg(lio_dev, "IQ is in use. Cannot create the IQ: %d again\n",
- iq_no);
- lio_dev->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
- lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
- return 0;
- }
-
lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue",
sizeof(struct lio_instr_queue),
RTE_CACHE_LINE_SIZE, socket_id);
@@ -870,23 +828,15 @@ lio_setup_iq(struct lio_device *lio_dev, int q_index,
lio_dev->instr_queue[iq_no]->q_index = q_index;
lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
- if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id))
- goto release_lio_iq;
+ if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id)) {
+ rte_free(lio_dev->instr_queue[iq_no]);
+ lio_dev->instr_queue[iq_no] = NULL;
+ return -1;
+ }
lio_dev->num_iqs++;
- if (lio_dev->fn_list.enable_io_queues(lio_dev))
- goto delete_lio_iq;
return 0;
-
-delete_lio_iq:
- lio_delete_instr_queue(lio_dev, iq_no);
- lio_dev->num_iqs--;
-release_lio_iq:
- rte_free(lio_dev->instr_queue[iq_no]);
- lio_dev->instr_queue[iq_no] = NULL;
-
- return -1;
}
int
@@ -957,14 +907,14 @@ post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
* position if queue gets full before Octeon could fetch any instr.
*/
if (rte_atomic64_read(&iq->instr_pending) >=
- (int32_t)(iq->max_count - 1)) {
+ (int32_t)(iq->nb_desc - 1)) {
st.status = LIO_IQ_SEND_FAILED;
st.index = -1;
return st;
}
if (rte_atomic64_read(&iq->instr_pending) >=
- (int32_t)(iq->max_count - 2))
+ (int32_t)(iq->nb_desc - 2))
st.status = LIO_IQ_SEND_STOP;
copy_cmd_into_iq(iq, cmd);
@@ -972,7 +922,7 @@ post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
/* "index" is returned, host_write_index is modified. */
st.index = iq->host_write_index;
iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
- iq->max_count);
+ iq->nb_desc);
iq->fill_cnt++;
/* Flush the command into memory. We need to be sure the data is in
@@ -1074,7 +1024,7 @@ lio_process_iq_request_list(struct lio_device *lio_dev,
skip_this:
inst_count++;
- old = lio_incr_index(old, 1, iq->max_count);
+ old = lio_incr_index(old, 1, iq->nb_desc);
}
iq->flush_index = old;
@@ -1094,7 +1044,7 @@ lio_update_read_index(struct lio_instr_queue *iq)
/* Add last_done and modulo with the IQ size to get new index */
iq->lio_read_index = (iq->lio_read_index +
(uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) %
- iq->max_count;
+ iq->nb_desc;
}
int
@@ -1552,7 +1502,7 @@ lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)
static inline uint32_t
lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no)
{
- return ((lio_dev->instr_queue[q_no]->max_count - 1) -
+ return ((lio_dev->instr_queue[q_no]->nb_desc - 1) -
(uint32_t)rte_atomic64_read(
&lio_dev->instr_queue[q_no]->instr_pending));
}
@@ -1562,7 +1512,7 @@ lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no)
{
return ((uint32_t)rte_atomic64_read(
&lio_dev->instr_queue[q_no]->instr_pending) >=
- (lio_dev->instr_queue[q_no]->max_count - 2));
+ (lio_dev->instr_queue[q_no]->nb_desc - 2));
}
static int
diff --git a/drivers/net/liquidio/lio_rxtx.h b/drivers/net/liquidio/lio_rxtx.h
index ef033735..d2a45104 100644
--- a/drivers/net/liquidio/lio_rxtx.h
+++ b/drivers/net/liquidio/lio_rxtx.h
@@ -1,34 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef _LIO_RXTX_H_
diff --git a/drivers/net/liquidio/lio_struct.h b/drivers/net/liquidio/lio_struct.h
index 10e3976a..10270c56 100644
--- a/drivers/net/liquidio/lio_struct.h
+++ b/drivers/net/liquidio/lio_struct.h
@@ -1,34 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef _LIO_STRUCT_H_
@@ -131,7 +102,7 @@ struct lio_droq {
rte_atomic64_t pkts_pending;
/** Number of descriptors in this ring. */
- uint32_t max_count;
+ uint32_t nb_desc;
/** The number of descriptors pending refill. */
uint32_t refill_count;
@@ -298,8 +269,8 @@ struct lio_instr_queue {
uint32_t status:8;
- /** Maximum no. of instructions in this queue. */
- uint32_t max_count;
+ /** Number of descriptors in this ring. */
+ uint32_t nb_desc;
/** Index in input ring where the driver should write the next packet */
uint32_t host_write_index;
diff --git a/drivers/net/meson.build b/drivers/net/meson.build
new file mode 100644
index 00000000..704cbe3c
--- /dev/null
+++ b/drivers/net/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+drivers = ['af_packet', 'bonding',
+ 'e1000', 'fm10k', 'i40e', 'ixgbe',
+ 'null', 'octeontx', 'pcap', 'ring',
+ 'sfc', 'thunderx']
+std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
+std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std
+std_deps += ['bus_vdev'] # same with vdev bus
+config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index f1f47c28..cc800493 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -33,11 +33,17 @@ include $(RTE_SDK)/mk/rte.vars.mk
# Library name.
LIB = librte_pmd_mlx4.a
+LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)
+LIB_GLUE_BASE = librte_pmd_mlx4_glue.so
+LIB_GLUE_VERSION = 18.02.0
# Sources.
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_flow.c
+ifneq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_glue.c
+endif
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_intr.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_mr.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxq.c
@@ -45,6 +51,10 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_utils.c
+ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
+INSTALL-$(CONFIG_RTE_LIBRTE_MLX4_PMD)-lib += $(LIB_GLUE)
+endif
+
# Basic CFLAGS.
CFLAGS += -O3
CFLAGS += -std=c11 -Wall -Wextra
@@ -54,7 +64,14 @@ CFLAGS += -D_BSD_SOURCE
CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
+ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
+CFLAGS += -DMLX4_GLUE='"$(LIB_GLUE)"'
+CFLAGS += -DMLX4_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
+CFLAGS_mlx4_glue.o += -fPIC
+LDLIBS += -ldl
+else
LDLIBS += -libverbs -lmlx4
+endif
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
@@ -82,10 +99,6 @@ ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE
CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE)
endif
-ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS),y)
-CFLAGS += -DMLX4_PMD_DEBUG_BROKEN_VERBS
-endif
-
include $(RTE_SDK)/mk/rte.lib.mk
# Generate and clean-up mlx4_autoconf.h.
@@ -112,7 +125,24 @@ mlx4_autoconf.h: mlx4_autoconf.h.new
$(SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD):.c=.o): mlx4_autoconf.h
+# Generate dependency plug-in for rdma-core when the PMD must not be linked
+# directly, so that applications do not inherit this dependency.
+
+ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
+
+$(LIB): $(LIB_GLUE)
+
+$(LIB_GLUE): mlx4_glue.o
+ $Q $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) \
+ -Wl,-h,$(LIB_GLUE) \
+ -s -shared -o $@ $< -libverbs -lmlx4
+
+mlx4_glue.o: mlx4_autoconf.h
+
+endif
+
clean_mlx4: FORCE
$Q rm -f -- mlx4_autoconf.h mlx4_autoconf.h.new
+ $Q rm -f -- mlx4_glue.o $(LIB_GLUE_BASE)*
clean: clean_mlx4
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index f9e4f9d7..ee93dafe 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2012 6WIND S.A.
- * Copyright 2012 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2012 6WIND S.A.
+ * Copyright 2012 Mellanox
*/
/**
@@ -37,6 +9,7 @@
*/
#include <assert.h>
+#include <dlfcn.h>
#include <errno.h>
#include <inttypes.h>
#include <stddef.h>
@@ -44,6 +17,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <unistd.h>
/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
@@ -55,9 +29,10 @@
#endif
#include <rte_common.h>
+#include <rte_config.h>
#include <rte_dev.h>
#include <rte_errno.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_ether.h>
#include <rte_flow.h>
@@ -67,6 +42,7 @@
#include <rte_mbuf.h>
#include "mlx4.h"
+#include "mlx4_glue.h"
#include "mlx4_flow.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
@@ -108,7 +84,13 @@ mlx4_dev_configure(struct rte_eth_dev *dev)
" flow error type %d, cause %p, message: %s",
-ret, strerror(-ret), error.type, error.cause,
error.message ? error.message : "(unspecified)");
+ goto exit;
}
+ ret = mlx4_intr_install(priv);
+ if (ret)
+ ERROR("%p: interrupt handler installation failed",
+ (void *)dev);
+exit:
return ret;
}
@@ -141,7 +123,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
(void *)dev, strerror(-ret));
goto err;
}
- ret = mlx4_intr_install(priv);
+ ret = mlx4_rxq_intr_enable(priv);
if (ret) {
ERROR("%p: interrupt handler installation failed",
(void *)dev);
@@ -187,7 +169,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
dev->rx_pkt_burst = mlx4_rx_burst_removed;
rte_wmb();
mlx4_flow_sync(priv, NULL);
- mlx4_intr_uninstall(priv);
+ mlx4_rxq_intr_disable(priv);
mlx4_rss_deinit(priv);
}
@@ -218,8 +200,8 @@ mlx4_dev_close(struct rte_eth_dev *dev)
mlx4_tx_queue_release(dev->data->tx_queues[i]);
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
- claim_zero(ibv_dealloc_pd(priv->pd));
- claim_zero(ibv_close_device(priv->ctx));
+ claim_zero(mlx4_glue->dealloc_pd(priv->pd));
+ claim_zero(mlx4_glue->close_device(priv->ctx));
} else
assert(priv->ctx == NULL);
mlx4_intr_uninstall(priv);
@@ -256,6 +238,7 @@ static const struct eth_dev_ops mlx4_dev_ops = {
.filter_ctrl = mlx4_filter_ctrl,
.rx_queue_intr_enable = mlx4_rx_intr_enable,
.rx_queue_intr_disable = mlx4_rx_intr_disable,
+ .is_removed = mlx4_is_removed,
};
/**
@@ -336,7 +319,7 @@ mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
return -rte_errno;
}
if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
- uint32_t ports = rte_log2_u32(conf->ports.present);
+ uint32_t ports = rte_log2_u32(conf->ports.present + 1);
if (tmp >= ports) {
ERROR("port index %lu outside range [0,%" PRIu32 ")",
@@ -426,6 +409,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
int err = 0;
struct ibv_context *attr_ctx = NULL;
struct ibv_device_attr device_attr;
+ struct ibv_device_attr_ex device_attr_ex;
struct mlx4_conf conf = {
.ports.present = 0,
};
@@ -434,7 +418,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
(void)pci_drv;
assert(pci_drv == &mlx4_driver);
- list = ibv_get_device_list(&i);
+ list = mlx4_glue->get_device_list(&i);
if (list == NULL) {
rte_errno = errno;
assert(rte_errno);
@@ -463,12 +447,12 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
PCI_DEVICE_ID_MELLANOX_CONNECTX3VF);
INFO("PCI information matches, using device \"%s\" (VF: %s)",
list[i]->name, (vf ? "true" : "false"));
- attr_ctx = ibv_open_device(list[i]);
+ attr_ctx = mlx4_glue->open_device(list[i]);
err = errno;
break;
}
if (attr_ctx == NULL) {
- ibv_free_device_list(list);
+ mlx4_glue->free_device_list(list);
switch (err) {
case 0:
rte_errno = ENODEV;
@@ -485,7 +469,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
}
ibv_dev = list[i];
DEBUG("device opened");
- if (ibv_query_device(attr_ctx, &device_attr)) {
+ if (mlx4_glue->query_device(attr_ctx, &device_attr)) {
rte_errno = ENODEV;
goto error;
}
@@ -499,6 +483,12 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
/* Use all ports when none are defined */
if (!conf.ports.enabled)
conf.ports.enabled = conf.ports.present;
+ /* Retrieve extended device attributes. */
+ if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
+ rte_errno = ENODEV;
+ goto error;
+ }
+ assert(device_attr.max_sge >= MLX4_MAX_SGE);
for (i = 0; i < device_attr.phys_port_cnt; i++) {
uint32_t port = i + 1; /* ports are indexed from one */
struct ibv_context *ctx = NULL;
@@ -512,13 +502,13 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (!(conf.ports.enabled & (1 << i)))
continue;
DEBUG("using port %u", port);
- ctx = ibv_open_device(ibv_dev);
+ ctx = mlx4_glue->open_device(ibv_dev);
if (ctx == NULL) {
rte_errno = ENODEV;
goto port_error;
}
/* Check port status. */
- err = ibv_query_port(ctx, port, &port_attr);
+ err = mlx4_glue->query_port(ctx, port, &port_attr);
if (err) {
rte_errno = err;
ERROR("port query failed: %s", strerror(rte_errno));
@@ -532,7 +522,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
}
if (port_attr.state != IBV_PORT_ACTIVE)
DEBUG("port %d is not active: \"%s\" (%d)",
- port, ibv_port_state_str(port_attr.state),
+ port, mlx4_glue->port_state_str(port_attr.state),
port_attr.state);
/* Make asynchronous FD non-blocking to handle interrupts. */
if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
@@ -541,7 +531,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
goto port_error;
}
/* Allocate protection domain. */
- pd = ibv_alloc_pd(ctx);
+ pd = mlx4_glue->alloc_pd(ctx);
if (pd == NULL) {
rte_errno = ENOMEM;
ERROR("PD allocation failure");
@@ -573,6 +563,21 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
DEBUG("L2 tunnel checksum offloads are %ssupported",
(priv->hw_csum_l2tun ? "" : "not "));
+ priv->hw_rss_sup = device_attr_ex.rss_caps.rx_hash_fields_mask;
+ if (!priv->hw_rss_sup) {
+ WARN("no RSS capabilities reported; disabling support"
+ " for UDP RSS and inner VXLAN RSS");
+ /* Fake support for all possible RSS hash fields. */
+ priv->hw_rss_sup = ~UINT64_C(0);
+ priv->hw_rss_sup = mlx4_conv_rss_hf(priv, -1);
+ /* Filter out known unsupported fields. */
+ priv->hw_rss_sup &=
+ ~(uint64_t)(IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP |
+ IBV_RX_HASH_INNER);
+ }
+ DEBUG("supported RSS hash fields mask: %016" PRIx64,
+ priv->hw_rss_sup);
/* Configure the first MAC address by default. */
if (mlx4_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
@@ -605,7 +610,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
char name[RTE_ETH_NAME_MAX_LEN];
snprintf(name, sizeof(name), "%s port %u",
- ibv_get_device_name(ibv_dev), port);
+ mlx4_glue->get_device_name(ibv_dev), port);
eth_dev = rte_eth_dev_allocate(name);
}
if (eth_dev == NULL) {
@@ -648,9 +653,9 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
port_error:
rte_free(priv);
if (pd)
- claim_zero(ibv_dealloc_pd(pd));
+ claim_zero(mlx4_glue->dealloc_pd(pd));
if (ctx)
- claim_zero(ibv_close_device(ctx));
+ claim_zero(mlx4_glue->close_device(ctx));
if (eth_dev)
rte_eth_dev_release_port(eth_dev);
break;
@@ -665,9 +670,9 @@ port_error:
*/
error:
if (attr_ctx)
- claim_zero(ibv_close_device(attr_ctx));
+ claim_zero(mlx4_glue->close_device(attr_ctx));
if (list)
- ibv_free_device_list(list);
+ mlx4_glue->free_device_list(list);
assert(rte_errno >= 0);
return -rte_errno;
}
@@ -700,6 +705,88 @@ static struct rte_pci_driver mlx4_driver = {
RTE_PCI_DRV_INTR_RMV,
};
+#ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS
+
+/**
+ * Initialization routine for run-time dependency on rdma-core.
+ */
+static int
+mlx4_glue_init(void)
+{
+ const char *path[] = {
+ /*
+ * A basic security check is necessary before trusting
+ * MLX4_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
+ */
+ (geteuid() == getuid() && getegid() == getgid() ?
+ getenv("MLX4_GLUE_PATH") : NULL),
+ RTE_EAL_PMD_PATH,
+ };
+ unsigned int i = 0;
+ void *handle = NULL;
+ void **sym;
+ const char *dlmsg;
+
+ while (!handle && i != RTE_DIM(path)) {
+ const char *end;
+ size_t len;
+ int ret;
+
+ if (!path[i]) {
+ ++i;
+ continue;
+ }
+ end = strpbrk(path[i], ":;");
+ if (!end)
+ end = path[i] + strlen(path[i]);
+ len = end - path[i];
+ ret = 0;
+ do {
+ char name[ret + 1];
+
+ ret = snprintf(name, sizeof(name), "%.*s%s" MLX4_GLUE,
+ (int)len, path[i],
+ (!len || *(end - 1) == '/') ? "" : "/");
+ if (ret == -1)
+ break;
+ if (sizeof(name) != (size_t)ret + 1)
+ continue;
+ DEBUG("looking for rdma-core glue as \"%s\"", name);
+ handle = dlopen(name, RTLD_LAZY);
+ break;
+ } while (1);
+ path[i] = end + 1;
+ if (!*end)
+ ++i;
+ }
+ if (!handle) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ WARN("cannot load glue library: %s", dlmsg);
+ goto glue_error;
+ }
+ sym = dlsym(handle, "mlx4_glue");
+ if (!sym || !*sym) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ ERROR("cannot resolve glue symbol: %s", dlmsg);
+ goto glue_error;
+ }
+ mlx4_glue = *sym;
+ return 0;
+glue_error:
+ if (handle)
+ dlclose(handle);
+ WARN("cannot initialize PMD due to missing run-time"
+ " dependency on rdma-core libraries (libibverbs,"
+ " libmlx4)");
+ return -rte_errno;
+}
+
+#endif
+
/**
* Driver initialization routine.
*/
@@ -708,13 +795,38 @@ static void
rte_mlx4_pmd_init(void)
{
/*
+ * MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we
+ * want to get success errno value in case of calling them
+ * when the device was removed.
+ */
+ setenv("MLX4_DEVICE_FATAL_CLEANUP", "1", 1);
+ /*
* RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
* huge pages. Calling ibv_fork_init() during init allows
* applications to use fork() safely for purposes other than
* using this PMD, which is not supported in forked processes.
*/
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
- ibv_fork_init();
+#ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS
+ if (mlx4_glue_init())
+ return;
+ assert(mlx4_glue);
+#endif
+#ifndef NDEBUG
+ /* Glue structure must not contain any NULL pointers. */
+ {
+ unsigned int i;
+
+ for (i = 0; i != sizeof(*mlx4_glue) / sizeof(void *); ++i)
+ assert(((const void *const *)mlx4_glue)[i]);
+ }
+#endif
+ if (strcmp(mlx4_glue->version, MLX4_GLUE_VERSION)) {
+ ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
+ mlx4_glue->version, MLX4_GLUE_VERSION);
+ return;
+ }
+ mlx4_glue->fork_init();
rte_pci_register(&mlx4_driver);
}
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 3aeef87e..19c8a223 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2012 6WIND S.A.
- * Copyright 2012 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2012 6WIND S.A.
+ * Copyright 2012 Mellanox
*/
#ifndef RTE_PMD_MLX4_H_
@@ -47,12 +19,17 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ether.h>
#include <rte_interrupts.h>
#include <rte_mempool.h>
#include <rte_spinlock.h>
+#ifndef IBV_RX_HASH_INNER
+/** This is not necessarily defined by supported RDMA core versions. */
+#define IBV_RX_HASH_INNER (1ull << 31)
+#endif /* IBV_RX_HASH_INNER */
+
/** Maximum number of simultaneous MAC addresses. This value is arbitrary. */
#define MLX4_MAX_MAC_ADDRESSES 128
@@ -126,8 +103,9 @@ struct priv {
uint32_t vf:1; /**< This is a VF device. */
uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */
uint32_t isolated:1; /**< Toggle isolated mode. */
- uint32_t hw_csum:1; /* Checksum offload is supported. */
- uint32_t hw_csum_l2tun:1; /* Checksum support for L2 tunnels. */
+ uint32_t hw_csum:1; /**< Checksum offload is supported. */
+ uint32_t hw_csum_l2tun:1; /**< Checksum support for L2 tunnels. */
+ uint64_t hw_rss_sup; /**< Supported RSS hash fields (Verbs format). */
struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */
@@ -165,11 +143,14 @@ int mlx4_flow_ctrl_get(struct rte_eth_dev *dev,
int mlx4_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int mlx4_is_removed(struct rte_eth_dev *dev);
/* mlx4_intr.c */
int mlx4_intr_uninstall(struct priv *priv);
int mlx4_intr_install(struct priv *priv);
+int mlx4_rxq_intr_enable(struct priv *priv);
+void mlx4_rxq_intr_disable(struct priv *priv);
int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 2f69e7d4..3bc69273 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
/**
@@ -63,13 +35,14 @@
#include <rte_bus_pci.h>
#include <rte_errno.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_pci.h>
#include "mlx4.h"
#include "mlx4_flow.h"
+#include "mlx4_glue.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
@@ -766,18 +739,10 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_offload_capa = 0;
- info->tx_offload_capa = 0;
- if (priv->hw_csum) {
- info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
- info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- }
- if (priv->hw_csum_l2tun)
- info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ info->tx_offload_capa = mlx4_get_tx_port_offloads(priv);
+ info->rx_queue_offload_capa = mlx4_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx4_get_rx_port_offloads(priv) |
+ info->rx_queue_offload_capa);
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
@@ -1060,3 +1025,23 @@ mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
}
return NULL;
}
+
+/**
+ * Check if mlx4 device was removed.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 1 when device is removed, otherwise 0.
+ */
+int
+mlx4_is_removed(struct rte_eth_dev *dev)
+{
+ struct ibv_device_attr device_attr;
+ struct priv *priv = dev->data->dev_private;
+
+ if (mlx4_glue->query_device(priv->ctx, &device_attr) == EIO)
+ return 1;
+ return 0;
+}
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 8b87b298..2d55bfe0 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
/**
@@ -57,7 +29,7 @@
#include <rte_byteorder.h>
#include <rte_errno.h>
#include <rte_eth_ctrl.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
@@ -65,6 +37,7 @@
/* PMD headers. */
#include "mlx4.h"
+#include "mlx4_glue.h"
#include "mlx4_flow.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
@@ -105,6 +78,11 @@ struct mlx4_drop {
/**
* Convert DPDK RSS hash fields to their Verbs equivalent.
*
+ * This function returns the supported (default) set when @p rss_hf has
+ * special value (uint64_t)-1.
+ *
+ * @param priv
+ * Pointer to private structure.
* @param rss_hf
* Hash fields in DPDK format (see struct rte_eth_rss_conf).
*
@@ -112,8 +90,8 @@ struct mlx4_drop {
* A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
* otherwise and rte_errno is set.
*/
-static uint64_t
-mlx4_conv_rss_hf(uint64_t rss_hf)
+uint64_t
+mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf)
{
enum { IPV4, IPV6, TCP, UDP, };
const uint64_t in[] = {
@@ -133,11 +111,9 @@ mlx4_conv_rss_hf(uint64_t rss_hf)
[TCP] = (ETH_RSS_NONFRAG_IPV4_TCP |
ETH_RSS_NONFRAG_IPV6_TCP |
ETH_RSS_IPV6_TCP_EX),
- /*
- * UDP support is temporarily disabled due to an
- * implementation issue in the kernel.
- */
- [UDP] = 0,
+ [UDP] = (ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX),
};
const uint64_t out[RTE_DIM(in)] = {
[IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
@@ -154,8 +130,15 @@ mlx4_conv_rss_hf(uint64_t rss_hf)
seen |= rss_hf & in[i];
conv |= out[i];
}
- if (!(rss_hf & ~seen))
- return conv;
+ if ((conv & priv->hw_rss_sup) == conv) {
+ if (rss_hf == (uint64_t)-1) {
+ /* Include inner RSS by default if supported. */
+ conv |= priv->hw_rss_sup & IBV_RX_HASH_INNER;
+ return conv;
+ }
+ if (!(rss_hf & ~seen))
+ return conv;
+ }
rte_errno = ENOTSUP;
return (uint64_t)-1;
}
@@ -759,10 +742,7 @@ fill:
&(struct rte_eth_rss_conf){
.rss_key = mlx4_rss_hash_key_default,
.rss_key_len = MLX4_RSS_HASH_KEY_SIZE,
- .rss_hf = (ETH_RSS_IPV4 |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_IPV6 |
- ETH_RSS_NONFRAG_IPV6_TCP),
+ .rss_hf = -1,
};
/* Sanity checks. */
for (i = 0; i < rss->num; ++i)
@@ -801,7 +781,8 @@ fill:
goto exit_action_not_supported;
}
flow->rss = mlx4_rss_get
- (priv, mlx4_conv_rss_hf(rss_conf->rss_hf),
+ (priv,
+ mlx4_conv_rss_hf(priv, rss_conf->rss_hf),
rss_conf->rss_key, rss->num, rss->queue);
if (!flow->rss) {
msg = "either invalid parameters or not enough"
@@ -914,24 +895,25 @@ mlx4_drop_get(struct priv *priv)
.priv = priv,
.refcnt = 1,
};
- drop->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
+ drop->cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!drop->cq)
goto error;
- drop->qp = ibv_create_qp(priv->pd,
- &(struct ibv_qp_init_attr){
- .send_cq = drop->cq,
- .recv_cq = drop->cq,
- .qp_type = IBV_QPT_RAW_PACKET,
- });
+ drop->qp = mlx4_glue->create_qp
+ (priv->pd,
+ &(struct ibv_qp_init_attr){
+ .send_cq = drop->cq,
+ .recv_cq = drop->cq,
+ .qp_type = IBV_QPT_RAW_PACKET,
+ });
if (!drop->qp)
goto error;
priv->drop = drop;
return drop;
error:
if (drop->qp)
- claim_zero(ibv_destroy_qp(drop->qp));
+ claim_zero(mlx4_glue->destroy_qp(drop->qp));
if (drop->cq)
- claim_zero(ibv_destroy_cq(drop->cq));
+ claim_zero(mlx4_glue->destroy_cq(drop->cq));
if (drop)
rte_free(drop);
rte_errno = ENOMEM;
@@ -951,8 +933,8 @@ mlx4_drop_put(struct mlx4_drop *drop)
if (--drop->refcnt)
return;
drop->priv->drop = NULL;
- claim_zero(ibv_destroy_qp(drop->qp));
- claim_zero(ibv_destroy_cq(drop->cq));
+ claim_zero(mlx4_glue->destroy_qp(drop->qp));
+ claim_zero(mlx4_glue->destroy_cq(drop->cq));
rte_free(drop);
}
@@ -984,7 +966,7 @@ mlx4_flow_toggle(struct priv *priv,
if (!enable) {
if (!flow->ibv_flow)
return 0;
- claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
flow->ibv_flow = NULL;
if (flow->drop)
mlx4_drop_put(priv->drop);
@@ -997,7 +979,7 @@ mlx4_flow_toggle(struct priv *priv,
!priv->isolated &&
flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
if (flow->ibv_flow) {
- claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
flow->ibv_flow = NULL;
if (flow->drop)
mlx4_drop_put(priv->drop);
@@ -1027,7 +1009,7 @@ mlx4_flow_toggle(struct priv *priv,
if (missing ^ !flow->drop)
return 0;
/* Verbs flow needs updating. */
- claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ claim_zero(mlx4_glue->destroy_flow(flow->ibv_flow));
flow->ibv_flow = NULL;
if (flow->drop)
mlx4_drop_put(priv->drop);
@@ -1048,6 +1030,8 @@ mlx4_flow_toggle(struct priv *priv,
flow->drop = missing;
}
if (flow->drop) {
+ if (flow->ibv_flow)
+ return 0;
mlx4_drop_get(priv);
if (!priv->drop) {
err = rte_errno;
@@ -1059,7 +1043,7 @@ mlx4_flow_toggle(struct priv *priv,
assert(qp);
if (flow->ibv_flow)
return 0;
- flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
+ flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
if (flow->ibv_flow)
return 0;
if (flow->drop)
@@ -1215,16 +1199,19 @@ mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
*
* Various flow rules are created depending on the mode the device is in:
*
- * 1. Promiscuous: port MAC + catch-all (VLAN filtering is ignored).
- * 2. All multicast: port MAC/VLAN + catch-all multicast.
- * 3. Otherwise: port MAC/VLAN + broadcast MAC/VLAN.
+ * 1. Promiscuous:
+ * port MAC + broadcast + catch-all (VLAN filtering is ignored).
+ * 2. All multicast:
+ * port MAC/VLAN + broadcast + catch-all multicast.
+ * 3. Otherwise:
+ * port MAC/VLAN + broadcast MAC/VLAN.
*
* About MAC flow rules:
*
* - MAC flow rules are generated from @p dev->data->mac_addrs
* (@p priv->mac array).
* - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
+ * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
* is enabled and VLAN filters are configured.
*
* @param priv
@@ -1292,13 +1279,11 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
};
struct ether_addr *rule_mac = &eth_spec.dst;
rte_be16_t *rule_vlan =
- priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
+ (priv->dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER) &&
!priv->dev->data->promiscuous ?
&vlan_spec.tci :
NULL;
- int broadcast =
- !priv->dev->data->promiscuous &&
- !priv->dev->data->all_multicast;
uint16_t vlan = 0;
struct rte_flow *flow;
unsigned int i;
@@ -1332,7 +1317,7 @@ next_vlan:
rule_vlan = NULL;
}
}
- for (i = 0; i != RTE_DIM(priv->mac) + broadcast; ++i) {
+ for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
const struct ether_addr *mac;
/* Broadcasts are handled by an extra iteration. */
@@ -1396,7 +1381,7 @@ next_vlan:
goto next_vlan;
}
/* Take care of promiscuous and all multicast flow rules. */
- if (!broadcast) {
+ if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_NEXT(flow, next)) {
diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
index 651fd37b..00188a65 100644
--- a/drivers/net/mlx4/mlx4_flow.h
+++ b/drivers/net/mlx4/mlx4_flow.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
#ifndef RTE_PMD_MLX4_FLOW_H_
@@ -47,7 +19,7 @@
#endif
#include <rte_eth_ctrl.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_byteorder.h>
@@ -75,6 +47,7 @@ struct rte_flow {
/* mlx4_flow.c */
+uint64_t mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf);
int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
void mlx4_flow_clean(struct priv *priv);
int mlx4_filter_ctrl(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx4/mlx4_glue.c b/drivers/net/mlx4/mlx4_glue.c
new file mode 100644
index 00000000..3b79d320
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_glue.c
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx4dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include "mlx4_glue.h"
+
+static int
+mlx4_glue_fork_init(void)
+{
+ return ibv_fork_init();
+}
+
+static int
+mlx4_glue_get_async_event(struct ibv_context *context,
+ struct ibv_async_event *event)
+{
+ return ibv_get_async_event(context, event);
+}
+
+static void
+mlx4_glue_ack_async_event(struct ibv_async_event *event)
+{
+ ibv_ack_async_event(event);
+}
+
+static struct ibv_pd *
+mlx4_glue_alloc_pd(struct ibv_context *context)
+{
+ return ibv_alloc_pd(context);
+}
+
+static int
+mlx4_glue_dealloc_pd(struct ibv_pd *pd)
+{
+ return ibv_dealloc_pd(pd);
+}
+
+static struct ibv_device **
+mlx4_glue_get_device_list(int *num_devices)
+{
+ return ibv_get_device_list(num_devices);
+}
+
+static void
+mlx4_glue_free_device_list(struct ibv_device **list)
+{
+ ibv_free_device_list(list);
+}
+
+static struct ibv_context *
+mlx4_glue_open_device(struct ibv_device *device)
+{
+ return ibv_open_device(device);
+}
+
+static int
+mlx4_glue_close_device(struct ibv_context *context)
+{
+ return ibv_close_device(context);
+}
+
+static const char *
+mlx4_glue_get_device_name(struct ibv_device *device)
+{
+ return ibv_get_device_name(device);
+}
+
+static int
+mlx4_glue_query_device(struct ibv_context *context,
+ struct ibv_device_attr *device_attr)
+{
+ return ibv_query_device(context, device_attr);
+}
+
+static int
+mlx4_glue_query_device_ex(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr)
+{
+ return ibv_query_device_ex(context, input, attr);
+}
+
+static int
+mlx4_glue_query_port(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr)
+{
+ return ibv_query_port(context, port_num, port_attr);
+}
+
+static const char *
+mlx4_glue_port_state_str(enum ibv_port_state port_state)
+{
+ return ibv_port_state_str(port_state);
+}
+
+static struct ibv_comp_channel *
+mlx4_glue_create_comp_channel(struct ibv_context *context)
+{
+ return ibv_create_comp_channel(context);
+}
+
+static int
+mlx4_glue_destroy_comp_channel(struct ibv_comp_channel *channel)
+{
+ return ibv_destroy_comp_channel(channel);
+}
+
+static struct ibv_cq *
+mlx4_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,
+ struct ibv_comp_channel *channel, int comp_vector)
+{
+ return ibv_create_cq(context, cqe, cq_context, channel, comp_vector);
+}
+
+static int
+mlx4_glue_destroy_cq(struct ibv_cq *cq)
+{
+ return ibv_destroy_cq(cq);
+}
+
+static int
+mlx4_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
+ void **cq_context)
+{
+ return ibv_get_cq_event(channel, cq, cq_context);
+}
+
+static void
+mlx4_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)
+{
+ ibv_ack_cq_events(cq, nevents);
+}
+
+static struct ibv_flow *
+mlx4_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)
+{
+ return ibv_create_flow(qp, flow);
+}
+
+static int
+mlx4_glue_destroy_flow(struct ibv_flow *flow_id)
+{
+ return ibv_destroy_flow(flow_id);
+}
+
+static struct ibv_qp *
+mlx4_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)
+{
+ return ibv_create_qp(pd, qp_init_attr);
+}
+
+static struct ibv_qp *
+mlx4_glue_create_qp_ex(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex)
+{
+ return ibv_create_qp_ex(context, qp_init_attr_ex);
+}
+
+static int
+mlx4_glue_destroy_qp(struct ibv_qp *qp)
+{
+ return ibv_destroy_qp(qp);
+}
+
+static int
+mlx4_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
+{
+ return ibv_modify_qp(qp, attr, attr_mask);
+}
+
+static struct ibv_mr *
+mlx4_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
+{
+ return ibv_reg_mr(pd, addr, length, access);
+}
+
+static int
+mlx4_glue_dereg_mr(struct ibv_mr *mr)
+{
+ return ibv_dereg_mr(mr);
+}
+
+static struct ibv_rwq_ind_table *
+mlx4_glue_create_rwq_ind_table(struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr)
+{
+ return ibv_create_rwq_ind_table(context, init_attr);
+}
+
+static int
+mlx4_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
+{
+ return ibv_destroy_rwq_ind_table(rwq_ind_table);
+}
+
+static struct ibv_wq *
+mlx4_glue_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr)
+{
+ return ibv_create_wq(context, wq_init_attr);
+}
+
+static int
+mlx4_glue_destroy_wq(struct ibv_wq *wq)
+{
+ return ibv_destroy_wq(wq);
+}
+static int
+mlx4_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
+{
+ return ibv_modify_wq(wq, wq_attr);
+}
+
+static int
+mlx4_glue_dv_init_obj(struct mlx4dv_obj *obj, uint64_t obj_type)
+{
+ return mlx4dv_init_obj(obj, obj_type);
+}
+
+static int
+mlx4_glue_dv_set_context_attr(struct ibv_context *context,
+ enum mlx4dv_set_ctx_attr_type attr_type,
+ void *attr)
+{
+ return mlx4dv_set_context_attr(context, attr_type, attr);
+}
+
+const struct mlx4_glue *mlx4_glue = &(const struct mlx4_glue){
+ .version = MLX4_GLUE_VERSION,
+ .fork_init = mlx4_glue_fork_init,
+ .get_async_event = mlx4_glue_get_async_event,
+ .ack_async_event = mlx4_glue_ack_async_event,
+ .alloc_pd = mlx4_glue_alloc_pd,
+ .dealloc_pd = mlx4_glue_dealloc_pd,
+ .get_device_list = mlx4_glue_get_device_list,
+ .free_device_list = mlx4_glue_free_device_list,
+ .open_device = mlx4_glue_open_device,
+ .close_device = mlx4_glue_close_device,
+ .get_device_name = mlx4_glue_get_device_name,
+ .query_device = mlx4_glue_query_device,
+ .query_device_ex = mlx4_glue_query_device_ex,
+ .query_port = mlx4_glue_query_port,
+ .port_state_str = mlx4_glue_port_state_str,
+ .create_comp_channel = mlx4_glue_create_comp_channel,
+ .destroy_comp_channel = mlx4_glue_destroy_comp_channel,
+ .create_cq = mlx4_glue_create_cq,
+ .destroy_cq = mlx4_glue_destroy_cq,
+ .get_cq_event = mlx4_glue_get_cq_event,
+ .ack_cq_events = mlx4_glue_ack_cq_events,
+ .create_flow = mlx4_glue_create_flow,
+ .destroy_flow = mlx4_glue_destroy_flow,
+ .create_qp = mlx4_glue_create_qp,
+ .create_qp_ex = mlx4_glue_create_qp_ex,
+ .destroy_qp = mlx4_glue_destroy_qp,
+ .modify_qp = mlx4_glue_modify_qp,
+ .reg_mr = mlx4_glue_reg_mr,
+ .dereg_mr = mlx4_glue_dereg_mr,
+ .create_rwq_ind_table = mlx4_glue_create_rwq_ind_table,
+ .destroy_rwq_ind_table = mlx4_glue_destroy_rwq_ind_table,
+ .create_wq = mlx4_glue_create_wq,
+ .destroy_wq = mlx4_glue_destroy_wq,
+ .modify_wq = mlx4_glue_modify_wq,
+ .dv_init_obj = mlx4_glue_dv_init_obj,
+ .dv_set_context_attr = mlx4_glue_dv_set_context_attr,
+};
diff --git a/drivers/net/mlx4/mlx4_glue.h b/drivers/net/mlx4/mlx4_glue.h
new file mode 100644
index 00000000..368f906b
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_glue.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox
+ */
+
+#ifndef MLX4_GLUE_H_
+#define MLX4_GLUE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx4dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#ifndef MLX4_GLUE_VERSION
+#define MLX4_GLUE_VERSION ""
+#endif
+
+/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
+struct mlx4_glue {
+ const char *version;
+ int (*fork_init)(void);
+ int (*get_async_event)(struct ibv_context *context,
+ struct ibv_async_event *event);
+ void (*ack_async_event)(struct ibv_async_event *event);
+ struct ibv_pd *(*alloc_pd)(struct ibv_context *context);
+ int (*dealloc_pd)(struct ibv_pd *pd);
+ struct ibv_device **(*get_device_list)(int *num_devices);
+ void (*free_device_list)(struct ibv_device **list);
+ struct ibv_context *(*open_device)(struct ibv_device *device);
+ int (*close_device)(struct ibv_context *context);
+ const char *(*get_device_name)(struct ibv_device *device);
+ int (*query_device)(struct ibv_context *context,
+ struct ibv_device_attr *device_attr);
+ int (*query_device_ex)(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr);
+ int (*query_port)(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr);
+ const char *(*port_state_str)(enum ibv_port_state port_state);
+ struct ibv_comp_channel *(*create_comp_channel)
+ (struct ibv_context *context);
+ int (*destroy_comp_channel)(struct ibv_comp_channel *channel);
+ struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
+ void *cq_context,
+ struct ibv_comp_channel *channel,
+ int comp_vector);
+ int (*destroy_cq)(struct ibv_cq *cq);
+ int (*get_cq_event)(struct ibv_comp_channel *channel,
+ struct ibv_cq **cq, void **cq_context);
+ void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);
+ struct ibv_flow *(*create_flow)(struct ibv_qp *qp,
+ struct ibv_flow_attr *flow);
+ int (*destroy_flow)(struct ibv_flow *flow_id);
+ struct ibv_qp *(*create_qp)(struct ibv_pd *pd,
+ struct ibv_qp_init_attr *qp_init_attr);
+ struct ibv_qp *(*create_qp_ex)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex);
+ int (*destroy_qp)(struct ibv_qp *qp);
+ int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask);
+ struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,
+ size_t length, int access);
+ int (*dereg_mr)(struct ibv_mr *mr);
+ struct ibv_rwq_ind_table *(*create_rwq_ind_table)
+ (struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr);
+ int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
+ struct ibv_wq *(*create_wq)(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr);
+ int (*destroy_wq)(struct ibv_wq *wq);
+ int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
+ int (*dv_init_obj)(struct mlx4dv_obj *obj, uint64_t obj_type);
+ int (*dv_set_context_attr)(struct ibv_context *context,
+ enum mlx4dv_set_ctx_attr_type attr_type,
+ void *attr);
+};
+
+const struct mlx4_glue *mlx4_glue;
+
+#endif /* MLX4_GLUE_H_ */
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index 50d19769..2141992e 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
/**
@@ -52,11 +24,12 @@
#include <rte_alarm.h>
#include <rte_errno.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_io.h>
#include <rte_interrupts.h>
#include "mlx4.h"
+#include "mlx4_glue.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
@@ -154,7 +127,7 @@ mlx4_link_status_alarm(struct priv *priv)
if (intr_conf->lsc && !mlx4_link_status_check(priv))
_rte_eth_dev_callback_process(priv->dev,
RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
/**
@@ -216,7 +189,7 @@ mlx4_interrupt_handler(struct priv *priv)
unsigned int i;
/* Read all message and acknowledge them. */
- while (!ibv_get_async_event(priv->ctx, &event)) {
+ while (!mlx4_glue->get_async_event(priv->ctx, &event)) {
switch (event.event_type) {
case IBV_EVENT_PORT_ACTIVE:
case IBV_EVENT_PORT_ERR:
@@ -231,12 +204,12 @@ mlx4_interrupt_handler(struct priv *priv)
DEBUG("event type %d on physical port %d not handled",
event.event_type, event.element.port_num);
}
- ibv_ack_async_event(&event);
+ mlx4_glue->ack_async_event(&event);
}
for (i = 0; i != RTE_DIM(caught); ++i)
if (caught[i])
_rte_eth_dev_callback_process(priv->dev, type[i],
- NULL, NULL);
+ NULL);
}
/**
@@ -291,7 +264,7 @@ mlx4_intr_uninstall(struct priv *priv)
}
rte_eal_alarm_cancel((void (*)(void *))mlx4_link_status_alarm, priv);
priv->intr_alarm = 0;
- mlx4_rx_intr_vec_disable(priv);
+ mlx4_rxq_intr_disable(priv);
rte_errno = err;
return 0;
}
@@ -313,8 +286,6 @@ mlx4_intr_install(struct priv *priv)
int rc;
mlx4_intr_uninstall(priv);
- if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
- goto error;
if (intr_conf->lsc | intr_conf->rmv) {
priv->intr_handle.fd = priv->ctx->async_fd;
rc = rte_intr_callback_register(&priv->intr_handle,
@@ -354,7 +325,8 @@ mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
if (!rxq || !rxq->channel) {
ret = EINVAL;
} else {
- ret = ibv_get_cq_event(rxq->cq->channel, &ev_cq, &ev_ctx);
+ ret = mlx4_glue->get_cq_event(rxq->cq->channel, &ev_cq,
+ &ev_ctx);
if (ret || ev_cq != rxq->cq)
ret = EINVAL;
}
@@ -364,7 +336,7 @@ mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
idx);
} else {
rxq->mcq.arm_sn++;
- ibv_ack_cq_events(rxq->cq, 1);
+ mlx4_glue->ack_cq_events(rxq->cq, 1);
}
return -ret;
}
@@ -395,3 +367,40 @@ mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
}
return -ret;
}
+
+/**
+ * Enable datapath interrupts.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_rxq_intr_enable(struct priv *priv)
+{
+ const struct rte_intr_conf *const intr_conf =
+ &priv->dev->data->dev_conf.intr_conf;
+
+ if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
+ goto error;
+ return 0;
+error:
+ return -rte_errno;
+}
+
+/**
+ * Disable datapath interrupts, keeping other interrupts intact.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+mlx4_rxq_intr_disable(struct priv *priv)
+{
+ int err = rte_errno; /* Make sure rte_errno remains unchanged. */
+
+ mlx4_rx_intr_vec_disable(priv);
+ rte_errno = err;
+}
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
index 2a3e2695..9a1e4de3 100644
--- a/drivers/net/mlx4/mlx4_mr.c
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
/**
@@ -60,6 +32,7 @@
#include <rte_mempool.h>
#include <rte_spinlock.h>
+#include "mlx4_glue.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
@@ -200,8 +173,8 @@ mlx4_mr_get(struct priv *priv, struct rte_mempool *mp)
.end = end,
.refcnt = 1,
.priv = priv,
- .mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
- IBV_ACCESS_LOCAL_WRITE),
+ .mr = mlx4_glue->reg_mr(priv->pd, (void *)start, end - start,
+ IBV_ACCESS_LOCAL_WRITE),
.mp = mp,
};
if (mr->mr) {
@@ -240,7 +213,7 @@ mlx4_mr_put(struct mlx4_mr *mr)
if (--mr->refcnt)
goto release;
LIST_REMOVE(mr, next);
- claim_zero(ibv_dereg_mr(mr->mr));
+ claim_zero(mlx4_glue->dereg_mr(mr->mr));
rte_free(mr);
release:
rte_spinlock_unlock(&priv->mr_lock);
diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h
index fcc7c129..153dda52 100644
--- a/drivers/net/mlx4/mlx4_prm.h
+++ b/drivers/net/mlx4/mlx4_prm.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
#ifndef MLX4_PRM_H_
@@ -53,23 +25,22 @@
#define MLX4_TXBB_SIZE (1 << MLX4_TXBB_SHIFT)
/* Typical TSO descriptor with 16 gather entries is 352 bytes. */
-#define MLX4_MAX_WQE_SIZE 512
-#define MLX4_MAX_WQE_TXBBS (MLX4_MAX_WQE_SIZE / MLX4_TXBB_SIZE)
+#define MLX4_MAX_SGE 32
+#define MLX4_MAX_WQE_SIZE \
+ (MLX4_MAX_SGE * sizeof(struct mlx4_wqe_data_seg) + \
+ sizeof(struct mlx4_wqe_ctrl_seg))
+#define MLX4_SEG_SHIFT 4
/* Send queue stamping/invalidating information. */
#define MLX4_SQ_STAMP_STRIDE 64
#define MLX4_SQ_STAMP_DWORDS (MLX4_SQ_STAMP_STRIDE / 4)
-#define MLX4_SQ_STAMP_SHIFT 31
+#define MLX4_SQ_OWNER_BIT 31
#define MLX4_SQ_STAMP_VAL 0x7fffffff
/* Work queue element (WQE) flags. */
-#define MLX4_BIT_WQE_OWN 0x80000000
#define MLX4_WQE_CTRL_IIP_HDR_CSUM (1 << 28)
#define MLX4_WQE_CTRL_IL4_HDR_CSUM (1 << 27)
-#define MLX4_SIZE_TO_TXBBS(size) \
- (RTE_ALIGN((size), (MLX4_TXBB_SIZE)) >> (MLX4_TXBB_SHIFT))
-
/* CQE checksum flags. */
enum {
MLX4_CQE_L2_TUNNEL_IPV4 = (int)(1u << 25),
@@ -98,17 +69,15 @@ enum {
struct mlx4_sq {
volatile uint8_t *buf; /**< SQ buffer. */
volatile uint8_t *eob; /**< End of SQ buffer */
- uint32_t head; /**< SQ head counter in units of TXBBS. */
- uint32_t tail; /**< SQ tail counter in units of TXBBS. */
- uint32_t txbb_cnt; /**< Num of WQEBB in the Q (should be ^2). */
- uint32_t txbb_cnt_mask; /**< txbbs_cnt mask (txbb_cnt is ^2). */
- uint32_t headroom_txbbs; /**< Num of txbbs that should be kept free. */
+ uint32_t size; /**< SQ size includes headroom. */
+ uint32_t remain_size; /**< Remaining WQE room in SQ (bytes). */
+ uint32_t owner_opcode;
+ /**< Default owner opcode with HW valid owner bit. */
+ uint32_t stamp; /**< Stamp value with an invalid HW owner bit. */
volatile uint32_t *db; /**< Pointer to the doorbell. */
uint32_t doorbell_qpn; /**< qp number to write to the doorbell. */
};
-#define mlx4_get_send_wqe(sq, n) ((sq)->buf + ((n) * (MLX4_TXBB_SIZE)))
-
/* Completion queue events, numbers and masks. */
#define MLX4_CQ_DB_GEQ_N_MASK 0x3
#define MLX4_CQ_DOORBELL 0x20
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 53313c56..7a036ed8 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
/**
@@ -55,13 +27,14 @@
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_errno.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_mempool.h>
#include "mlx4.h"
+#include "mlx4_glue.h"
#include "mlx4_flow.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
@@ -231,7 +204,7 @@ mlx4_rss_attach(struct mlx4_rss *rss)
}
ind_tbl[i] = rxq->wq;
}
- rss->ind = ibv_create_rwq_ind_table
+ rss->ind = mlx4_glue->create_rwq_ind_table
(priv->ctx,
&(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
@@ -243,7 +216,7 @@ mlx4_rss_attach(struct mlx4_rss *rss)
msg = "RSS indirection table creation failure";
goto error;
}
- rss->qp = ibv_create_qp_ex
+ rss->qp = mlx4_glue->create_qp_ex
(priv->ctx,
&(struct ibv_qp_init_attr_ex){
.comp_mask = (IBV_QP_INIT_ATTR_PD |
@@ -264,7 +237,7 @@ mlx4_rss_attach(struct mlx4_rss *rss)
msg = "RSS hash QP creation failure";
goto error;
}
- ret = ibv_modify_qp
+ ret = mlx4_glue->modify_qp
(rss->qp,
&(struct ibv_qp_attr){
.qp_state = IBV_QPS_INIT,
@@ -275,7 +248,7 @@ mlx4_rss_attach(struct mlx4_rss *rss)
msg = "failed to switch RSS hash QP to INIT state";
goto error;
}
- ret = ibv_modify_qp
+ ret = mlx4_glue->modify_qp
(rss->qp,
&(struct ibv_qp_attr){
.qp_state = IBV_QPS_RTR,
@@ -288,11 +261,11 @@ mlx4_rss_attach(struct mlx4_rss *rss)
return 0;
error:
if (rss->qp) {
- claim_zero(ibv_destroy_qp(rss->qp));
+ claim_zero(mlx4_glue->destroy_qp(rss->qp));
rss->qp = NULL;
}
if (rss->ind) {
- claim_zero(ibv_destroy_rwq_ind_table(rss->ind));
+ claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
rss->ind = NULL;
}
while (i--)
@@ -325,9 +298,9 @@ mlx4_rss_detach(struct mlx4_rss *rss)
assert(rss->ind);
if (--rss->usecnt)
return;
- claim_zero(ibv_destroy_qp(rss->qp));
+ claim_zero(mlx4_glue->destroy_qp(rss->qp));
rss->qp = NULL;
- claim_zero(ibv_destroy_rwq_ind_table(rss->ind));
+ claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
rss->ind = NULL;
for (i = 0; i != rss->queues; ++i)
mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
@@ -364,9 +337,10 @@ mlx4_rss_init(struct priv *priv)
int ret;
/* Prepare range for RSS contexts before creating the first WQ. */
- ret = mlx4dv_set_context_attr(priv->ctx,
- MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
- &log2_range);
+ ret = mlx4_glue->dv_set_context_attr
+ (priv->ctx,
+ MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
+ &log2_range);
if (ret) {
ERROR("cannot set up range size for RSS context to %u"
" (for %u Rx queues), error: %s",
@@ -402,13 +376,13 @@ mlx4_rss_init(struct priv *priv)
* sequentially and are guaranteed to never be reused in the
* same context by the underlying implementation.
*/
- cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
+ cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!cq) {
ret = ENOMEM;
msg = "placeholder CQ creation failure";
goto error;
}
- wq = ibv_create_wq
+ wq = mlx4_glue->create_wq
(priv->ctx,
&(struct ibv_wq_init_attr){
.wq_type = IBV_WQT_RQ,
@@ -419,11 +393,11 @@ mlx4_rss_init(struct priv *priv)
});
if (wq) {
wq_num = wq->wq_num;
- claim_zero(ibv_destroy_wq(wq));
+ claim_zero(mlx4_glue->destroy_wq(wq));
} else {
wq_num = 0; /* Shut up GCC 4.8 warnings. */
}
- claim_zero(ibv_destroy_cq(cq));
+ claim_zero(mlx4_glue->destroy_cq(cq));
if (!wq) {
ret = ENOMEM;
msg = "placeholder WQ creation failure";
@@ -522,13 +496,14 @@ mlx4_rxq_attach(struct rxq *rxq)
int ret;
assert(rte_is_power_of_2(elts_n));
- cq = ibv_create_cq(priv->ctx, elts_n / sges_n, NULL, rxq->channel, 0);
+ cq = mlx4_glue->create_cq(priv->ctx, elts_n / sges_n, NULL,
+ rxq->channel, 0);
if (!cq) {
ret = ENOMEM;
msg = "CQ creation failure";
goto error;
}
- wq = ibv_create_wq
+ wq = mlx4_glue->create_wq
(priv->ctx,
&(struct ibv_wq_init_attr){
.wq_type = IBV_WQT_RQ,
@@ -542,7 +517,7 @@ mlx4_rxq_attach(struct rxq *rxq)
msg = "WQ creation failure";
goto error;
}
- ret = ibv_modify_wq
+ ret = mlx4_glue->modify_wq
(wq,
&(struct ibv_wq_attr){
.attr_mask = IBV_WQ_ATTR_STATE,
@@ -557,7 +532,7 @@ mlx4_rxq_attach(struct rxq *rxq)
mlxdv.cq.out = &dv_cq;
mlxdv.rwq.in = wq;
mlxdv.rwq.out = &dv_rwq;
- ret = mlx4dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
+ ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
if (ret) {
msg = "failed to obtain device information from WQ/CQ objects";
goto error;
@@ -619,9 +594,9 @@ mlx4_rxq_attach(struct rxq *rxq)
return 0;
error:
if (wq)
- claim_zero(ibv_destroy_wq(wq));
+ claim_zero(mlx4_glue->destroy_wq(wq));
if (cq)
- claim_zero(ibv_destroy_cq(cq));
+ claim_zero(mlx4_glue->destroy_cq(cq));
rte_errno = ret;
ERROR("error while attaching Rx queue %p: %s: %s",
(void *)rxq, msg, strerror(ret));
@@ -649,9 +624,9 @@ mlx4_rxq_detach(struct rxq *rxq)
memset(&rxq->mcq, 0, sizeof(rxq->mcq));
rxq->rq_db = NULL;
rxq->wqes = NULL;
- claim_zero(ibv_destroy_wq(rxq->wq));
+ claim_zero(mlx4_glue->destroy_wq(rxq->wq));
rxq->wq = NULL;
- claim_zero(ibv_destroy_cq(rxq->cq));
+ claim_zero(mlx4_glue->destroy_cq(rxq->cq));
rxq->cq = NULL;
DEBUG("%p: freeing Rx queue elements", (void *)rxq);
for (i = 0; (i != RTE_DIM(*elts)); ++i) {
@@ -663,6 +638,64 @@ mlx4_rxq_detach(struct rxq *rxq)
}
/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_get_rx_queue_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+
+ if (priv->hw_csum)
+ offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ return offloads;
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx4_get_rx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ (void)priv;
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param requested
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * Nonzero when configuration is valid.
+ */
+static int
+mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
+{
+ uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported = mlx4_get_rx_port_offloads(priv);
+
+ return !((mandatory ^ requested) & supported);
+}
+
+/**
* DPDK callback to configure a Rx queue.
*
* @param dev
@@ -707,6 +740,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
+ if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
+ rte_errno = ENOTSUP;
+ ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (mlx4_get_rx_port_offloads(priv) |
+ mlx4_get_rx_queue_offloads(priv)));
+ return -rte_errno;
+ }
if (idx >= dev->data->nb_rx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -746,10 +789,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts_n = rte_log2_u32(desc),
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
- .csum = (priv->hw_csum &&
- dev->data->dev_conf.rxmode.hw_ip_checksum),
- .csum_l2tun = (priv->hw_csum_l2tun &&
- dev->data->dev_conf.rxmode.hw_ip_checksum),
+ .csum = priv->hw_csum &&
+ (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ .csum_l2tun = priv->hw_csum_l2tun &&
+ (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
.l2tun_offload = priv->hw_csum_l2tun,
.stats = {
.idx = idx,
@@ -761,7 +804,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -812,7 +855,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
goto error;
}
if (dev->data->dev_conf.intr_conf.rxq) {
- rxq->channel = ibv_create_comp_channel(priv->ctx);
+ rxq->channel = mlx4_glue->create_comp_channel(priv->ctx);
if (rxq->channel == NULL) {
rte_errno = ENOMEM;
ERROR("%p: Rx interrupt completion channel creation"
@@ -867,7 +910,7 @@ mlx4_rx_queue_release(void *dpdk_rxq)
assert(!rxq->wqes);
assert(!rxq->rq_db);
if (rxq->channel)
- claim_zero(ibv_destroy_comp_channel(rxq->channel));
+ claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel));
if (rxq->mr)
mlx4_mr_put(rxq->mr);
rte_free(rxq);
diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
index 2bfa8b1b..8ca8b77c 100644
--- a/drivers/net/mlx4/mlx4_rxtx.c
+++ b/drivers/net/mlx4/mlx4_rxtx.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
/**
@@ -61,9 +33,6 @@
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
-#define WQE_ONE_DATA_SEG_SIZE \
- (sizeof(struct mlx4_wqe_ctrl_seg) + sizeof(struct mlx4_wqe_data_seg))
-
/**
* Pointer-value pair structure used in tx_post_send for saving the first
* DWORD (32 byte) of a TXBB.
@@ -88,7 +57,8 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
* giving a total of up to 256 entries.
*/
[0x00] = RTE_PTYPE_L2_ETHER,
- [0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ [0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_FRAG,
[0x03] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
@@ -261,59 +231,48 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
};
/**
- * Stamp a WQE so it won't be reused by the HW.
+ * Stamp TXBB burst so it won't be reused by the HW.
*
* Routine is used when freeing WQE used by the chip or when failing
* building an WQ entry has failed leaving partial information on the queue.
*
* @param sq
* Pointer to the SQ structure.
- * @param index
- * Index of the freed WQE.
- * @param num_txbbs
- * Number of blocks to stamp.
- * If < 0 the routine will use the size written in the WQ entry.
- * @param owner
- * The value of the WQE owner bit to use in the stamp.
+ * @param start
+ * Pointer to the first TXBB to stamp.
+ * @param end
+ * Pointer to the followed end TXBB to stamp.
*
* @return
- * The number of Tx basic blocs (TXBB) the WQE contained.
+ * Stamping burst size in byte units.
*/
-static int
-mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, uint16_t index, uint8_t owner)
+static uint32_t
+mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, volatile uint32_t *start,
+ volatile uint32_t *end)
{
- uint32_t stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
- (!!owner << MLX4_SQ_STAMP_SHIFT));
- volatile uint8_t *wqe = mlx4_get_send_wqe(sq,
- (index & sq->txbb_cnt_mask));
- volatile uint32_t *ptr = (volatile uint32_t *)wqe;
- int i;
- int txbbs_size;
- int num_txbbs;
+ uint32_t stamp = sq->stamp;
+ int32_t size = (intptr_t)end - (intptr_t)start;
- /* Extract the size from the control segment of the WQE. */
- num_txbbs = MLX4_SIZE_TO_TXBBS((((volatile struct mlx4_wqe_ctrl_seg *)
- wqe)->fence_size & 0x3f) << 4);
- txbbs_size = num_txbbs * MLX4_TXBB_SIZE;
- /* Optimize the common case when there is no wrap-around. */
- if (wqe + txbbs_size <= sq->eob) {
- /* Stamp the freed descriptor. */
- for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
- *ptr = stamp;
- ptr += MLX4_SQ_STAMP_DWORDS;
- }
- } else {
- /* Stamp the freed descriptor. */
- for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
- *ptr = stamp;
- ptr += MLX4_SQ_STAMP_DWORDS;
- if ((volatile uint8_t *)ptr >= sq->eob) {
- ptr = (volatile uint32_t *)sq->buf;
- stamp ^= RTE_BE32(0x80000000);
- }
- }
+ assert(start != end);
+ /* Hold SQ ring wrap around. */
+ if (size < 0) {
+ size = (int32_t)sq->size + size;
+ do {
+ *start = stamp;
+ start += MLX4_SQ_STAMP_DWORDS;
+ } while (start != (volatile uint32_t *)sq->eob);
+ start = (volatile uint32_t *)sq->buf;
+ /* Flip invalid stamping ownership. */
+ stamp ^= RTE_BE32(0x1 << MLX4_SQ_OWNER_BIT);
+ sq->stamp = stamp;
+ if (start == end)
+ return size;
}
- return num_txbbs;
+ do {
+ *start = stamp;
+ start += MLX4_SQ_STAMP_DWORDS;
+ } while (start != end);
+ return (uint32_t)size;
}
/**
@@ -326,23 +285,21 @@ mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, uint16_t index, uint8_t owner)
*
* @param txq
* Pointer to Tx queue structure.
- *
- * @return
- * 0 on success, -1 on failure.
+ * @param elts_m
+ * Tx elements number mask.
+ * @param sq
+ * Pointer to the SQ structure.
*/
-static int
-mlx4_txq_complete(struct txq *txq, const unsigned int elts_n,
- struct mlx4_sq *sq)
+static void
+mlx4_txq_complete(struct txq *txq, const unsigned int elts_m,
+ struct mlx4_sq *sq)
{
- unsigned int elts_comp = txq->elts_comp;
unsigned int elts_tail = txq->elts_tail;
- unsigned int sq_tail = sq->tail;
struct mlx4_cq *cq = &txq->mcq;
volatile struct mlx4_cqe *cqe;
+ uint32_t completed;
uint32_t cons_index = cq->cons_index;
- uint16_t new_index;
- uint16_t nr_txbbs = 0;
- int pkts = 0;
+ volatile uint32_t *first_txbb;
/*
* Traverse over all CQ entries reported and handle each WQ entry
@@ -353,11 +310,11 @@ mlx4_txq_complete(struct txq *txq, const unsigned int elts_n,
if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
!!(cons_index & cq->cqe_cnt)))
break;
+#ifndef NDEBUG
/*
* Make sure we read the CQE after we read the ownership bit.
*/
rte_io_rmb();
-#ifndef NDEBUG
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
volatile struct mlx4_err_cqe *cqe_err =
@@ -366,41 +323,24 @@ mlx4_txq_complete(struct txq *txq, const unsigned int elts_n,
" syndrome: 0x%x\n",
(void *)txq, cqe_err->vendor_err,
cqe_err->syndrome);
+ break;
}
#endif /* NDEBUG */
- /* Get WQE index reported in the CQE. */
- new_index =
- rte_be_to_cpu_16(cqe->wqe_index) & sq->txbb_cnt_mask;
- do {
- /* Free next descriptor. */
- sq_tail += nr_txbbs;
- nr_txbbs =
- mlx4_txq_stamp_freed_wqe(sq,
- sq_tail & sq->txbb_cnt_mask,
- !!(sq_tail & sq->txbb_cnt));
- pkts++;
- } while ((sq_tail & sq->txbb_cnt_mask) != new_index);
cons_index++;
} while (1);
- if (unlikely(pkts == 0))
- return 0;
- /* Update CQ. */
+ completed = (cons_index - cq->cons_index) * txq->elts_comp_cd_init;
+ if (unlikely(!completed))
+ return;
+ /* First stamping address is the end of the last one. */
+ first_txbb = (&(*txq->elts)[elts_tail & elts_m])->eocb;
+ elts_tail += completed;
+ /* The new tail element holds the end address. */
+ sq->remain_size += mlx4_txq_stamp_freed_wqe(sq, first_txbb,
+ (&(*txq->elts)[elts_tail & elts_m])->eocb);
+ /* Update CQ consumer index. */
cq->cons_index = cons_index;
- *cq->set_ci_db = rte_cpu_to_be_32(cq->cons_index & MLX4_CQ_DB_CI_MASK);
- sq->tail = sq_tail + nr_txbbs;
- /* Update the list of packets posted for transmission. */
- elts_comp -= pkts;
- assert(elts_comp <= txq->elts_comp);
- /*
- * Assume completion status is successful as nothing can be done about
- * it anyway.
- */
- elts_tail += pkts;
- if (elts_tail >= elts_n)
- elts_tail -= elts_n;
+ *cq->set_ci_db = rte_cpu_to_be_32(cons_index & MLX4_CQ_DB_CI_MASK);
txq->elts_tail = elts_tail;
- txq->elts_comp = elts_comp;
- return 0;
}
/**
@@ -421,110 +361,166 @@ mlx4_txq_mb2mp(struct rte_mbuf *buf)
return buf->pool;
}
-static int
+/**
+ * Write Tx data segment to the SQ.
+ *
+ * @param dseg
+ * Pointer to data segment in SQ.
+ * @param lkey
+ * Memory region lkey.
+ * @param addr
+ * Data address.
+ * @param byte_count
+ * Big endian bytes count of the data to send.
+ */
+static inline void
+mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg,
+ uint32_t lkey, uintptr_t addr, rte_be32_t byte_count)
+{
+ dseg->addr = rte_cpu_to_be_64(addr);
+ dseg->lkey = rte_cpu_to_be_32(lkey);
+#if RTE_CACHE_LINE_SIZE < 64
+ /*
+ * Need a barrier here before writing the byte_count
+ * fields to make sure that all the data is visible
+ * before the byte_count field is set.
+ * Otherwise, if the segment begins a new cacheline,
+ * the HCA prefetcher could grab the 64-byte chunk and
+ * get a valid (!= 0xffffffff) byte count but stale
+ * data, and end up sending the wrong data.
+ */
+ rte_io_wmb();
+#endif /* RTE_CACHE_LINE_SIZE */
+ dseg->byte_count = byte_count;
+}
+
+/**
+ * Write data segments of multi-segment packet.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param ctrl
+ * Pointer to the WQE control segment.
+ *
+ * @return
+ * Pointer to the next WQE control segment on success, NULL otherwise.
+ */
+static volatile struct mlx4_wqe_ctrl_seg *
mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
- volatile struct mlx4_wqe_ctrl_seg **pctrl)
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
{
- int wqe_real_size;
- int nr_txbbs;
struct pv *pv = (struct pv *)txq->bounce_buf;
struct mlx4_sq *sq = &txq->msq;
- uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
- volatile struct mlx4_wqe_ctrl_seg *ctrl;
- volatile struct mlx4_wqe_data_seg *dseg;
- struct rte_mbuf *sbuf;
+ struct rte_mbuf *sbuf = buf;
uint32_t lkey;
- uintptr_t addr;
- uint32_t byte_count;
int pv_counter = 0;
+ int nb_segs = buf->nb_segs;
+ uint32_t wqe_size;
+ volatile struct mlx4_wqe_data_seg *dseg =
+ (volatile struct mlx4_wqe_data_seg *)(ctrl + 1);
- /* Calculate the needed work queue entry size for this packet. */
- wqe_real_size = sizeof(volatile struct mlx4_wqe_ctrl_seg) +
- buf->nb_segs * sizeof(volatile struct mlx4_wqe_data_seg);
- nr_txbbs = MLX4_SIZE_TO_TXBBS(wqe_real_size);
+ ctrl->fence_size = 1 + nb_segs;
+ wqe_size = RTE_ALIGN((uint32_t)(ctrl->fence_size << MLX4_SEG_SHIFT),
+ MLX4_TXBB_SIZE);
+ /* Validate WQE size and WQE space in the send queue. */
+ if (sq->remain_size < wqe_size ||
+ wqe_size > MLX4_MAX_WQE_SIZE)
+ return NULL;
/*
- * Check that there is room for this WQE in the send queue and that
- * the WQE size is legal.
+ * Fill the data segments with buffer information.
+ * First WQE TXBB head segment is always control segment,
+ * so jump to tail TXBB data segments code for the first
+ * WQE data segments filling.
*/
- if (((sq->head - sq->tail) + nr_txbbs +
- sq->headroom_txbbs) >= sq->txbb_cnt ||
- nr_txbbs > MLX4_MAX_WQE_TXBBS) {
- return -1;
+ goto txbb_tail_segs;
+txbb_head_seg:
+ /* Memory region key (big endian) for this memory pool. */
+ lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ if (unlikely(lkey == (uint32_t)-1)) {
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ return NULL;
}
- /* Get the control and data entries of the WQE. */
- ctrl = (volatile struct mlx4_wqe_ctrl_seg *)
- mlx4_get_send_wqe(sq, head_idx);
- dseg = (volatile struct mlx4_wqe_data_seg *)
- ((uintptr_t)ctrl + sizeof(struct mlx4_wqe_ctrl_seg));
- *pctrl = ctrl;
- /* Fill the data segments with buffer information. */
- for (sbuf = buf; sbuf != NULL; sbuf = sbuf->next, dseg++) {
- addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
- rte_prefetch0((volatile void *)addr);
- /* Handle WQE wraparound. */
- if (dseg >= (volatile struct mlx4_wqe_data_seg *)sq->eob)
- dseg = (volatile struct mlx4_wqe_data_seg *)sq->buf;
- dseg->addr = rte_cpu_to_be_64(addr);
- /* Memory region key (big endian) for this memory pool. */
+ /* Handle WQE wraparound. */
+ if (dseg >=
+ (volatile struct mlx4_wqe_data_seg *)sq->eob)
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ sq->buf;
+ dseg->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(sbuf, uintptr_t));
+ dseg->lkey = rte_cpu_to_be_32(lkey);
+ /*
+ * This data segment starts at the beginning of a new
+ * TXBB, so we need to postpone its byte_count writing
+ * for later.
+ */
+ pv[pv_counter].dseg = dseg;
+ /*
+ * Zero length segment is treated as inline segment
+ * with zero data.
+ */
+ pv[pv_counter++].val = rte_cpu_to_be_32(sbuf->data_len ?
+ sbuf->data_len : 0x80000000);
+ sbuf = sbuf->next;
+ dseg++;
+ nb_segs--;
+txbb_tail_segs:
+ /* Jump to default if there are more than two segments remaining. */
+ switch (nb_segs) {
+ default:
lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
- dseg->lkey = rte_cpu_to_be_32(lkey);
-#ifndef NDEBUG
- /* Calculate the needed work queue entry size for this packet */
- if (unlikely(dseg->lkey == rte_cpu_to_be_32((uint32_t)-1))) {
- /* MR does not exist. */
+ if (unlikely(lkey == (uint32_t)-1)) {
DEBUG("%p: unable to get MP <-> MR association",
- (void *)txq);
- /*
- * Restamp entry in case of failure.
- * Make sure that size is written correctly
- * Note that we give ownership to the SW, not the HW.
- */
- wqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) +
- buf->nb_segs * sizeof(struct mlx4_wqe_data_seg);
- ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
- mlx4_txq_stamp_freed_wqe(sq, head_idx,
- (sq->head & sq->txbb_cnt) ? 0 : 1);
- return -1;
+ (void *)txq);
+ return NULL;
}
-#endif /* NDEBUG */
- if (likely(sbuf->data_len)) {
- byte_count = rte_cpu_to_be_32(sbuf->data_len);
- } else {
- /*
- * Zero length segment is treated as inline segment
- * with zero data.
- */
- byte_count = RTE_BE32(0x80000000);
+ mlx4_fill_tx_data_seg(dseg, lkey,
+ rte_pktmbuf_mtod(sbuf, uintptr_t),
+ rte_cpu_to_be_32(sbuf->data_len ?
+ sbuf->data_len :
+ 0x80000000));
+ sbuf = sbuf->next;
+ dseg++;
+ nb_segs--;
+ /* fallthrough */
+ case 2:
+ lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ if (unlikely(lkey == (uint32_t)-1)) {
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ return NULL;
}
- /*
- * If the data segment is not at the beginning of a
- * Tx basic block (TXBB) then write the byte count,
- * else postpone the writing to just before updating the
- * control segment.
- */
- if ((uintptr_t)dseg & (uintptr_t)(MLX4_TXBB_SIZE - 1)) {
-#if RTE_CACHE_LINE_SIZE < 64
- /*
- * Need a barrier here before writing the byte_count
- * fields to make sure that all the data is visible
- * before the byte_count field is set.
- * Otherwise, if the segment begins a new cacheline,
- * the HCA prefetcher could grab the 64-byte chunk and
- * get a valid (!= 0xffffffff) byte count but stale
- * data, and end up sending the wrong data.
- */
- rte_io_wmb();
-#endif /* RTE_CACHE_LINE_SIZE */
- dseg->byte_count = byte_count;
- } else {
- /*
- * This data segment starts at the beginning of a new
- * TXBB, so we need to postpone its byte_count writing
- * for later.
- */
- pv[pv_counter].dseg = dseg;
- pv[pv_counter++].val = byte_count;
+ mlx4_fill_tx_data_seg(dseg, lkey,
+ rte_pktmbuf_mtod(sbuf, uintptr_t),
+ rte_cpu_to_be_32(sbuf->data_len ?
+ sbuf->data_len :
+ 0x80000000));
+ sbuf = sbuf->next;
+ dseg++;
+ nb_segs--;
+ /* fallthrough */
+ case 1:
+ lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ if (unlikely(lkey == (uint32_t)-1)) {
+ DEBUG("%p: unable to get MP <-> MR association",
+ (void *)txq);
+ return NULL;
}
+ mlx4_fill_tx_data_seg(dseg, lkey,
+ rte_pktmbuf_mtod(sbuf, uintptr_t),
+ rte_cpu_to_be_32(sbuf->data_len ?
+ sbuf->data_len :
+ 0x80000000));
+ nb_segs--;
+ if (nb_segs) {
+ sbuf = sbuf->next;
+ dseg++;
+ goto txbb_head_seg;
+ }
+ /* fallthrough */
+ case 0:
+ break;
}
/* Write the first DWORD of each TXBB save earlier. */
if (pv_counter) {
@@ -533,9 +529,10 @@ mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
for (--pv_counter; pv_counter >= 0; pv_counter--)
pv[pv_counter].dseg->byte_count = pv[pv_counter].val;
}
- /* Fill the control parameters for this packet. */
- ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
- return nr_txbbs;
+ sq->remain_size -= wqe_size;
+ /* Align next WQE address to the next TXBB. */
+ return (volatile struct mlx4_wqe_ctrl_seg *)
+ ((volatile uint8_t *)ctrl + wqe_size);
}
/**
@@ -557,40 +554,39 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
struct txq *txq = (struct txq *)dpdk_txq;
unsigned int elts_head = txq->elts_head;
const unsigned int elts_n = txq->elts_n;
+ const unsigned int elts_m = elts_n - 1;
unsigned int bytes_sent = 0;
unsigned int i;
- unsigned int max;
+ unsigned int max = elts_head - txq->elts_tail;
struct mlx4_sq *sq = &txq->msq;
- int nr_txbbs;
+ volatile struct mlx4_wqe_ctrl_seg *ctrl;
+ struct txq_elt *elt;
assert(txq->elts_comp_cd != 0);
- if (likely(txq->elts_comp != 0))
- mlx4_txq_complete(txq, elts_n, sq);
- max = (elts_n - (elts_head - txq->elts_tail));
- if (max > elts_n)
- max -= elts_n;
+ if (likely(max >= txq->elts_comp_cd_init))
+ mlx4_txq_complete(txq, elts_m, sq);
+ max = elts_n - max;
assert(max >= 1);
assert(max <= elts_n);
/* Always leave one free entry in the ring. */
--max;
if (max > pkts_n)
max = pkts_n;
+ elt = &(*txq->elts)[elts_head & elts_m];
+ /* First Tx burst element saves the next WQE control segment. */
+ ctrl = elt->wqe;
for (i = 0; (i != max); ++i) {
struct rte_mbuf *buf = pkts[i];
- unsigned int elts_head_next =
- (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
- struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
- struct txq_elt *elt = &(*txq->elts)[elts_head];
- uint32_t owner_opcode = MLX4_OPCODE_SEND;
- volatile struct mlx4_wqe_ctrl_seg *ctrl;
- volatile struct mlx4_wqe_data_seg *dseg;
+ struct txq_elt *elt_next = &(*txq->elts)[++elts_head & elts_m];
+ uint32_t owner_opcode = sq->owner_opcode;
+ volatile struct mlx4_wqe_data_seg *dseg =
+ (volatile struct mlx4_wqe_data_seg *)(ctrl + 1);
+ volatile struct mlx4_wqe_ctrl_seg *ctrl_next;
union {
uint32_t flags;
uint16_t flags16[2];
} srcrb;
- uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
uint32_t lkey;
- uintptr_t addr;
/* Clean up old buffer. */
if (likely(elt->buf != NULL)) {
@@ -598,7 +594,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
#ifndef NDEBUG
/* Poisoning. */
- memset(elt, 0x66, sizeof(*elt));
+ memset(&elt->buf, 0x66, sizeof(struct rte_mbuf *));
#endif
/* Faster than rte_pktmbuf_free(). */
do {
@@ -610,70 +606,48 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
if (buf->nb_segs == 1) {
- /*
- * Check that there is room for this WQE in the send
- * queue and that the WQE size is legal
- */
- if (((sq->head - sq->tail) + 1 + sq->headroom_txbbs) >=
- sq->txbb_cnt || 1 > MLX4_MAX_WQE_TXBBS) {
+ /* Validate WQE space in the send queue. */
+ if (sq->remain_size < MLX4_TXBB_SIZE) {
elt->buf = NULL;
break;
}
- /* Get the control and data entries of the WQE. */
- ctrl = (volatile struct mlx4_wqe_ctrl_seg *)
- mlx4_get_send_wqe(sq, head_idx);
- dseg = (volatile struct mlx4_wqe_data_seg *)
- ((uintptr_t)ctrl +
- sizeof(struct mlx4_wqe_ctrl_seg));
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- rte_prefetch0((volatile void *)addr);
- /* Handle WQE wraparound. */
- if (dseg >=
- (volatile struct mlx4_wqe_data_seg *)sq->eob)
- dseg = (volatile struct mlx4_wqe_data_seg *)
- sq->buf;
- dseg->addr = rte_cpu_to_be_64(addr);
- /* Memory region key (big endian). */
lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
- dseg->lkey = rte_cpu_to_be_32(lkey);
-#ifndef NDEBUG
- if (unlikely(dseg->lkey ==
- rte_cpu_to_be_32((uint32_t)-1))) {
+ if (unlikely(lkey == (uint32_t)-1)) {
/* MR does not exist. */
DEBUG("%p: unable to get MP <-> MR association",
(void *)txq);
- /*
- * Restamp entry in case of failure.
- * Make sure that size is written correctly
- * Note that we give ownership to the SW,
- * not the HW.
- */
- ctrl->fence_size =
- (WQE_ONE_DATA_SEG_SIZE >> 4) & 0x3f;
- mlx4_txq_stamp_freed_wqe(sq, head_idx,
- (sq->head & sq->txbb_cnt) ? 0 : 1);
elt->buf = NULL;
break;
}
-#endif /* NDEBUG */
- /* Never be TXBB aligned, no need compiler barrier. */
- dseg->byte_count = rte_cpu_to_be_32(buf->data_len);
- /* Fill the control parameters for this packet. */
- ctrl->fence_size = (WQE_ONE_DATA_SEG_SIZE >> 4) & 0x3f;
- nr_txbbs = 1;
+ mlx4_fill_tx_data_seg(dseg++, lkey,
+ rte_pktmbuf_mtod(buf, uintptr_t),
+ rte_cpu_to_be_32(buf->data_len));
+ /* Set WQE size in 16-byte units. */
+ ctrl->fence_size = 0x2;
+ sq->remain_size -= MLX4_TXBB_SIZE;
+ /* Align next WQE address to the next TXBB. */
+ ctrl_next = ctrl + 0x4;
} else {
- nr_txbbs = mlx4_tx_burst_segs(buf, txq, &ctrl);
- if (nr_txbbs < 0) {
+ ctrl_next = mlx4_tx_burst_segs(buf, txq, ctrl);
+ if (!ctrl_next) {
elt->buf = NULL;
break;
}
}
+ /* Hold SQ ring wrap around. */
+ if ((volatile uint8_t *)ctrl_next >= sq->eob) {
+ ctrl_next = (volatile struct mlx4_wqe_ctrl_seg *)
+ ((volatile uint8_t *)ctrl_next - sq->size);
+ /* Flip HW valid ownership. */
+ sq->owner_opcode ^= 0x1 << MLX4_SQ_OWNER_BIT;
+ }
/*
* For raw Ethernet, the SOLICIT flag is used to indicate
* that no ICRC should be calculated.
*/
- txq->elts_comp_cd -= nr_txbbs;
- if (unlikely(txq->elts_comp_cd <= 0)) {
+ if (--txq->elts_comp_cd == 0) {
+ /* Save the completion burst end address. */
+ elt_next->eocb = (volatile uint32_t *)ctrl_next;
txq->elts_comp_cd = txq->elts_comp_cd_init;
srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |
MLX4_WQE_CTRL_CQ_UPDATE);
@@ -719,17 +693,17 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* executing as soon as we do).
*/
rte_io_wmb();
- ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode |
- ((sq->head & sq->txbb_cnt) ?
- MLX4_BIT_WQE_OWN : 0));
- sq->head += nr_txbbs;
+ ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode);
elt->buf = buf;
bytes_sent += buf->pkt_len;
- elts_head = elts_head_next;
+ ctrl = ctrl_next;
+ elt = elt_next;
}
/* Take a shortcut if nothing must be sent. */
if (unlikely(i == 0))
return 0;
+ /* Save WQE address of the next Tx burst element. */
+ elt->wqe = ctrl;
/* Increment send statistics counters. */
txq->stats.opackets += i;
txq->stats.obytes += bytes_sent;
@@ -737,8 +711,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
rte_wmb();
/* Ring QP doorbell. */
rte_write32(txq->msq.doorbell_qpn, txq->msq.db);
- txq->elts_head = elts_head;
- txq->elts_comp += i;
+ txq->elts_head += i;
return i;
}
@@ -964,7 +937,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Update packet information. */
pkt->packet_type =
rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
- pkt->ol_flags = 0;
+ pkt->ol_flags = PKT_RX_RSS_HASH;
+ pkt->hash.rss = cqe->immed_rss_invalid;
pkt->pkt_len = len;
if (rxq->csum | rxq->csum_l2tun) {
uint32_t flags =
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index 463df2b0..c12bd39a 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
#ifndef MLX4_RXTX_H_
@@ -47,7 +19,7 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_mbuf.h>
#include <rte_mempool.h>
@@ -105,6 +77,10 @@ struct mlx4_rss {
/** Tx element. */
struct txq_elt {
struct rte_mbuf *buf; /**< Buffer. */
+ union {
+ volatile struct mlx4_wqe_ctrl_seg *wqe; /**< SQ WQE. */
+ volatile uint32_t *eocb; /**< End of completion burst. */
+ };
};
/** Rx queue counters. */
@@ -121,7 +97,6 @@ struct txq {
struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
unsigned int elts_head; /**< Current index in (*elts)[]. */
unsigned int elts_tail; /**< First element awaiting completion. */
- unsigned int elts_comp; /**< Number of packets awaiting completion. */
int elts_comp_cd; /**< Countdown for next completion. */
unsigned int elts_comp_cd_init; /**< Initial value for countdown. */
unsigned int elts_n; /**< (*elts)[] length. */
@@ -158,6 +133,8 @@ int mlx4_rss_attach(struct mlx4_rss *rss);
void mlx4_rss_detach(struct mlx4_rss *rss);
int mlx4_rxq_attach(struct rxq *rxq);
void mlx4_rxq_detach(struct rxq *rxq);
+uint64_t mlx4_get_rx_port_offloads(struct priv *priv);
+uint64_t mlx4_get_rx_queue_offloads(struct priv *priv);
int mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
@@ -177,6 +154,7 @@ uint16_t mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
/* mlx4_txq.c */
+uint64_t mlx4_get_tx_port_offloads(struct priv *priv);
int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_txconf *conf);
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 7882a4d0..071b2d5d 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
/**
@@ -41,6 +13,7 @@
#include <stddef.h>
#include <stdint.h>
#include <string.h>
+#include <inttypes.h>
/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
@@ -53,13 +26,13 @@
#include <rte_common.h>
#include <rte_errno.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_mempool.h>
#include "mlx4.h"
-#include "mlx4_autoconf.h"
+#include "mlx4_glue.h"
#include "mlx4_prm.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
@@ -76,16 +49,16 @@ mlx4_txq_free_elts(struct txq *txq)
unsigned int elts_head = txq->elts_head;
unsigned int elts_tail = txq->elts_tail;
struct txq_elt (*elts)[txq->elts_n] = txq->elts;
+ unsigned int elts_m = txq->elts_n - 1;
DEBUG("%p: freeing WRs", (void *)txq);
while (elts_tail != elts_head) {
- struct txq_elt *elt = &(*elts)[elts_tail];
+ struct txq_elt *elt = &(*elts)[elts_tail++ & elts_m];
assert(elt->buf != NULL);
rte_pktmbuf_free(elt->buf);
elt->buf = NULL;
- if (++elts_tail == RTE_DIM(*elts))
- elts_tail = 0;
+ elt->wqe = NULL;
}
txq->elts_tail = txq->elts_head;
}
@@ -163,20 +136,19 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
struct mlx4_cq *cq = &txq->mcq;
struct mlx4dv_qp *dqp = mlxdv->qp.out;
struct mlx4dv_cq *dcq = mlxdv->cq.out;
- uint32_t sq_size = (uint32_t)dqp->rq.offset - (uint32_t)dqp->sq.offset;
- sq->buf = (uint8_t *)dqp->buf.buf + dqp->sq.offset;
/* Total length, including headroom and spare WQEs. */
- sq->eob = sq->buf + sq_size;
- sq->head = 0;
- sq->tail = 0;
- sq->txbb_cnt =
- (dqp->sq.wqe_cnt << dqp->sq.wqe_shift) >> MLX4_TXBB_SHIFT;
- sq->txbb_cnt_mask = sq->txbb_cnt - 1;
+ sq->size = (uint32_t)dqp->rq.offset - (uint32_t)dqp->sq.offset;
+ sq->buf = (uint8_t *)dqp->buf.buf + dqp->sq.offset;
+ sq->eob = sq->buf + sq->size;
+ uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift);
+ /* Continuous headroom size bytes must always stay freed. */
+ sq->remain_size = sq->size - headroom_size;
+ sq->owner_opcode = MLX4_OPCODE_SEND | (0 << MLX4_SQ_OWNER_BIT);
+ sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
+ (0 << MLX4_SQ_OWNER_BIT));
sq->db = dqp->sdb;
sq->doorbell_qpn = dqp->doorbell_qpn;
- sq->headroom_txbbs =
- (2048 + (1 << dqp->sq.wqe_shift)) >> MLX4_TXBB_SHIFT;
cq->buf = dcq->buf.buf;
cq->cqe_cnt = dcq->cqe_cnt;
cq->set_ci_db = dcq->set_ci_db;
@@ -184,6 +156,50 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
}
/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_get_tx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (priv->hw_csum) {
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ }
+ if (priv->hw_csum_l2tun)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param requested
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * Nonzero when configuration is valid.
+ */
+static int
+mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
+{
+ uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
+ uint64_t supported = mlx4_get_tx_port_offloads(priv);
+
+ return !((mandatory ^ requested) & supported);
+}
+
+/**
* DPDK callback to configure a Tx queue.
*
* @param dev
@@ -208,7 +224,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mlx4dv_obj mlxdv;
struct mlx4dv_qp dv_qp;
struct mlx4dv_cq dv_cq;
- struct txq_elt (*elts)[desc];
+ struct txq_elt (*elts)[rte_align32pow2(desc)];
struct ibv_qp_init_attr qp_init_attr;
struct txq *txq;
uint8_t *bounce_buf;
@@ -231,9 +247,22 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
};
int ret;
- (void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
+ /*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+ !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
+ rte_errno = ENOTSUP;
+ ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx4_get_tx_port_offloads(priv));
+ return -rte_errno;
+ }
if (idx >= dev->data->nb_tx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -252,6 +281,12 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
ERROR("%p: invalid number of Tx descriptors", (void *)dev);
return -rte_errno;
}
+ if (desc != RTE_DIM(*elts)) {
+ desc = RTE_DIM(*elts);
+ WARN("%p: increased number of descriptors in Tx queue %u"
+ " to the next power of two (%u)",
+ (void *)dev, idx, desc);
+ }
/* Allocate and initialize Tx queue. */
mlx4_zmallocv_socket("TXQ", vec, RTE_DIM(vec), socket);
if (!txq) {
@@ -269,7 +304,6 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts = elts,
.elts_head = 0,
.elts_tail = 0,
- .elts_comp = 0,
/*
* Request send completion every MLX4_PMD_TX_PER_COMP_REQ
* packets or at least 4 times per ring.
@@ -278,13 +312,18 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
.elts_comp_cd_init =
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
- .csum = priv->hw_csum,
- .csum_l2tun = priv->hw_csum_l2tun,
+ .csum = priv->hw_csum &&
+ (conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM)),
+ .csum_l2tun = priv->hw_csum_l2tun &&
+ (conf->offloads &
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
/* Enable Tx loopback for VF devices. */
.lb = !!priv->vf,
.bounce_buf = bounce_buf,
};
- txq->cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+ txq->cq = mlx4_glue->create_cq(priv->ctx, desc, NULL, NULL, 0);
if (!txq->cq) {
rte_errno = ENOMEM;
ERROR("%p: CQ creation failure: %s",
@@ -304,7 +343,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
/* No completion events must occur by default. */
.sq_sig_all = 0,
};
- txq->qp = ibv_create_qp(priv->pd, &qp_init_attr);
+ txq->qp = mlx4_glue->create_qp(priv->pd, &qp_init_attr);
if (!txq->qp) {
rte_errno = errno ? errno : EINVAL;
ERROR("%p: QP creation failure: %s",
@@ -312,7 +351,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
goto error;
}
txq->max_inline = qp_init_attr.cap.max_inline_data;
- ret = ibv_modify_qp
+ ret = mlx4_glue->modify_qp
(txq->qp,
&(struct ibv_qp_attr){
.qp_state = IBV_QPS_INIT,
@@ -325,7 +364,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, strerror(rte_errno));
goto error;
}
- ret = ibv_modify_qp
+ ret = mlx4_glue->modify_qp
(txq->qp,
&(struct ibv_qp_attr){
.qp_state = IBV_QPS_RTR,
@@ -337,7 +376,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, strerror(rte_errno));
goto error;
}
- ret = ibv_modify_qp
+ ret = mlx4_glue->modify_qp
(txq->qp,
&(struct ibv_qp_attr){
.qp_state = IBV_QPS_RTS,
@@ -354,7 +393,7 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
mlxdv.cq.out = &dv_cq;
mlxdv.qp.in = txq->qp;
mlxdv.qp.out = &dv_qp;
- ret = mlx4dv_init_obj(&mlxdv, MLX4DV_OBJ_QP | MLX4DV_OBJ_CQ);
+ ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_QP | MLX4DV_OBJ_CQ);
if (ret) {
rte_errno = EINVAL;
ERROR("%p: failed to obtain information needed for"
@@ -362,6 +401,9 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
goto error;
}
mlx4_txq_fill_dv_obj_info(txq, &mlxdv);
+ /* Save first wqe pointer in the first element. */
+ (&(*txq->elts)[0])->wqe =
+ (volatile struct mlx4_wqe_ctrl_seg *)txq->msq.buf;
/* Pre-register known mempools. */
rte_mempool_walk(mlx4_txq_mp2mr_iter, txq);
DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
@@ -401,9 +443,9 @@ mlx4_tx_queue_release(void *dpdk_txq)
}
mlx4_txq_free_elts(txq);
if (txq->qp)
- claim_zero(ibv_destroy_qp(txq->qp));
+ claim_zero(mlx4_glue->destroy_qp(txq->qp));
if (txq->cq)
- claim_zero(ibv_destroy_cq(txq->cq));
+ claim_zero(mlx4_glue->destroy_cq(txq->cq));
for (i = 0; i != RTE_DIM(txq->mp2mr); ++i) {
if (!txq->mp2mr[i].mp)
break;
diff --git a/drivers/net/mlx4/mlx4_utils.c b/drivers/net/mlx4/mlx4_utils.c
index f18c7145..d10812ec 100644
--- a/drivers/net/mlx4/mlx4_utils.c
+++ b/drivers/net/mlx4/mlx4_utils.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
/**
diff --git a/drivers/net/mlx4/mlx4_utils.h b/drivers/net/mlx4/mlx4_utils.h
index dc529c9c..9fdbacad 100644
--- a/drivers/net/mlx4/mlx4_utils.h
+++ b/drivers/net/mlx4/mlx4_utils.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox
*/
#ifndef MLX4_UTILS_H_
@@ -70,13 +42,7 @@ pmd_drv_log_basename(const char *s)
__func__, \
RTE_FMT_TAIL(__VA_ARGS__,)))
#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
-#ifndef MLX4_PMD_DEBUG_BROKEN_VERBS
#define claim_zero(...) assert((__VA_ARGS__) == 0)
-#else /* MLX4_PMD_DEBUG_BROKEN_VERBS */
-#define claim_zero(...) \
- (void)(((__VA_ARGS__) == 0) || \
- DEBUG("Assertion `(" # __VA_ARGS__ ") == 0' failed (IGNORED)."))
-#endif /* MLX4_PMD_DEBUG_BROKEN_VERBS */
#else /* NDEBUG */
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index a3984eb9..3bc9736c 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -33,9 +33,15 @@ include $(RTE_SDK)/mk/rte.vars.mk
# Library name.
LIB = librte_pmd_mlx5.a
+LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)
+LIB_GLUE_BASE = librte_pmd_mlx5_glue.so
+LIB_GLUE_VERSION = 18.02.0
# Sources.
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c
+ifneq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_glue.c
+endif
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx.c
@@ -54,6 +60,10 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c
+ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
+INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE)
+endif
+
# Basic CFLAGS.
CFLAGS += -O3
CFLAGS += -std=c11 -Wall -Wextra
@@ -64,7 +74,14 @@ CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -Wno-strict-prototypes
+ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
+CFLAGS += -DMLX5_GLUE='"$(LIB_GLUE)"'
+CFLAGS += -DMLX5_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
+CFLAGS_mlx5_glue.o += -fPIC
+LDLIBS += -ldl
+else
LDLIBS += -libverbs -lmlx5
+endif
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
@@ -157,7 +174,24 @@ mlx5_autoconf.h: mlx5_autoconf.h.new
$(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h
+# Generate dependency plug-in for rdma-core when the PMD must not be linked
+# directly, so that applications do not inherit this dependency.
+
+ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
+
+$(LIB): $(LIB_GLUE)
+
+$(LIB_GLUE): mlx5_glue.o
+ $Q $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) \
+ -Wl,-h,$(LIB_GLUE) \
+ -s -shared -o $@ $< -libverbs -lmlx5
+
+mlx5_glue.o: mlx5_autoconf.h
+
+endif
+
clean_mlx5: FORCE
$Q rm -f -- mlx5_autoconf.h mlx5_autoconf.h.new
+ $Q rm -f -- mlx5_glue.o $(LIB_GLUE_BASE)*
clean: clean_mlx5
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 0548d17a..6c0985bd 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1,44 +1,18 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <stddef.h>
#include <unistd.h>
#include <string.h>
#include <assert.h>
+#include <dlfcn.h>
#include <stdint.h>
#include <stdlib.h>
#include <errno.h>
#include <net/if.h>
+#include <sys/mman.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -51,11 +25,13 @@
#endif
#include <rte_malloc.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_eal_memconfig.h>
#include <rte_kvargs.h>
#include "mlx5.h"
@@ -63,6 +39,7 @@
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
+#include "mlx5_glue.h"
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
@@ -85,18 +62,12 @@
/* Device parameter to limit the size of inlining packet. */
#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
-/* Device parameter to enable hardware TSO offload. */
-#define MLX5_TSO "tso"
-
/* Device parameter to enable hardware Tx vector. */
#define MLX5_TX_VEC_EN "tx_vec_en"
/* Device parameter to enable hardware Rx vector. */
#define MLX5_RX_VEC_EN "rx_vec_en"
-/* Default PMD specific parameter value. */
-#define MLX5_ARG_UNSET (-1)
-
#ifndef HAVE_IBV_MLX5_MOD_MPW
#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@@ -106,17 +77,6 @@
#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
#endif
-struct mlx5_args {
- int cqe_comp;
- int txq_inline;
- int txqs_inline;
- int mps;
- int mpw_hdr_dseg;
- int inline_max_packet_sz;
- int tso;
- int tx_vec_en;
- int rx_vec_en;
-};
/**
* Retrieve integer value from environment variable.
*
@@ -156,11 +116,20 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
struct priv *priv = data;
void *ret;
size_t alignment = sysconf(_SC_PAGESIZE);
+ unsigned int socket = SOCKET_ID_ANY;
+
+ if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
+ const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
+
+ socket = ctrl->socket;
+ } else if (priv->verbs_alloc_ctx.type ==
+ MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
+ const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
+ socket = ctrl->socket;
+ }
assert(data != NULL);
- assert(!mlx5_is_secondary());
- ret = rte_malloc_socket(__func__, size, alignment,
- priv->dev->device->numa_node);
+ ret = rte_malloc_socket(__func__, size, alignment, socket);
DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret);
return ret;
}
@@ -177,7 +146,6 @@ static void
mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
{
assert(data != NULL);
- assert(!mlx5_is_secondary());
DEBUG("Extern free request: %p", ptr);
rte_free(ptr);
}
@@ -193,7 +161,7 @@ mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
static void
mlx5_dev_close(struct rte_eth_dev *dev)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
int ret;
@@ -225,15 +193,16 @@ mlx5_dev_close(struct rte_eth_dev *dev)
}
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
- claim_zero(ibv_dealloc_pd(priv->pd));
- claim_zero(ibv_close_device(priv->ctx));
+ claim_zero(mlx5_glue->dealloc_pd(priv->pd));
+ claim_zero(mlx5_glue->close_device(priv->ctx));
} else
assert(priv->ctx == NULL);
if (priv->rss_conf.rss_key != NULL)
rte_free(priv->rss_conf.rss_key);
if (priv->reta_idx != NULL)
rte_free(priv->reta_idx);
- priv_socket_uninit(priv);
+ if (priv->primary_socket)
+ priv_socket_uninit(priv);
ret = mlx5_priv_hrxq_ibv_verify(priv);
if (ret)
WARN("%p: some Hash Rx queue still remain", (void *)priv);
@@ -303,6 +272,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
.tx_descriptor_status = mlx5_tx_descriptor_status,
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
+ .is_removed = mlx5_is_removed,
};
static const struct eth_dev_ops mlx5_dev_sec_ops = {
@@ -350,6 +320,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.tx_descriptor_status = mlx5_tx_descriptor_status,
.rx_queue_intr_enable = mlx5_rx_intr_enable,
.rx_queue_intr_disable = mlx5_rx_intr_disable,
+ .is_removed = mlx5_is_removed,
};
static struct {
@@ -401,7 +372,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
static int
mlx5_args_check(const char *key, const char *val, void *opaque)
{
- struct mlx5_args *args = opaque;
+ struct mlx5_dev_config *config = opaque;
unsigned long tmp;
errno = 0;
@@ -411,23 +382,21 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
return errno;
}
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
- args->cqe_comp = !!tmp;
+ config->cqe_comp = !!tmp;
} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
- args->txq_inline = tmp;
+ config->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
- args->txqs_inline = tmp;
+ config->txqs_inline = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
- args->mps = !!tmp;
+ config->mps = !!tmp ? config->mps : 0;
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
- args->mpw_hdr_dseg = !!tmp;
+ config->mpw_hdr_dseg = !!tmp;
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
- args->inline_max_packet_sz = tmp;
- } else if (strcmp(MLX5_TSO, key) == 0) {
- args->tso = !!tmp;
+ config->inline_max_packet_sz = tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
- args->tx_vec_en = !!tmp;
+ config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
- args->rx_vec_en = !!tmp;
+ config->rx_vec_en = !!tmp;
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
@@ -438,8 +407,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
/**
* Parse device parameters.
*
- * @param priv
- * Pointer to private structure.
+ * @param config
+ * Pointer to device configuration structure.
* @param devargs
* Device arguments structure.
*
@@ -447,7 +416,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
* 0 on success, errno value on failure.
*/
static int
-mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
+mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
{
const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN,
@@ -456,7 +425,6 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
MLX5_TXQ_MPW_EN,
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
- MLX5_TSO,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
NULL,
@@ -475,7 +443,7 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
for (i = 0; (params[i] != NULL); ++i) {
if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i],
- mlx5_args_check, args);
+ mlx5_args_check, config);
if (ret != 0) {
rte_kvargs_free(kvlist);
return ret;
@@ -488,36 +456,104 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
static struct rte_pci_driver mlx5_driver;
+/*
+ * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
+ * local resource used by both primary and secondary to avoid duplicate
+ * reservation.
+ * The space has to be available on both primary and secondary process,
+ * TXQ UAR maps to this area using fixed mmap w/o double check.
+ */
+static void *uar_base;
+
/**
- * Assign parameters from args into priv, only non default
- * values are considered.
+ * Reserve UAR address space for primary process.
*
- * @param[out] priv
+ * @param[in] priv
* Pointer to private structure.
- * @param[in] args
- * Pointer to args values.
+ *
+ * @return
+ * 0 on success, errno value on failure.
*/
-static void
-mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
+static int
+priv_uar_init_primary(struct priv *priv)
{
- if (args->cqe_comp != MLX5_ARG_UNSET)
- priv->cqe_comp = args->cqe_comp;
- if (args->txq_inline != MLX5_ARG_UNSET)
- priv->txq_inline = args->txq_inline;
- if (args->txqs_inline != MLX5_ARG_UNSET)
- priv->txqs_inline = args->txqs_inline;
- if (args->mps != MLX5_ARG_UNSET)
- priv->mps = args->mps ? priv->mps : 0;
- if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
- priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
- if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
- priv->inline_max_packet_sz = args->inline_max_packet_sz;
- if (args->tso != MLX5_ARG_UNSET)
- priv->tso = args->tso;
- if (args->tx_vec_en != MLX5_ARG_UNSET)
- priv->tx_vec_en = args->tx_vec_en;
- if (args->rx_vec_en != MLX5_ARG_UNSET)
- priv->rx_vec_en = args->rx_vec_en;
+ void *addr = (void *)0;
+ int i;
+ const struct rte_mem_config *mcfg;
+ int ret;
+
+ if (uar_base) { /* UAR address space mapped. */
+ priv->uar_base = uar_base;
+ return 0;
+ }
+ /* find out lower bound of hugepage segments */
+ mcfg = rte_eal_get_configuration()->mem_config;
+ for (i = 0; i < RTE_MAX_MEMSEG && mcfg->memseg[i].addr; i++) {
+ if (addr)
+ addr = RTE_MIN(addr, mcfg->memseg[i].addr);
+ else
+ addr = mcfg->memseg[i].addr;
+ }
+ /* keep distance to hugepages to minimize potential conflicts. */
+ addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
+ /* anonymous mmap, no real memory consumption. */
+ addr = mmap(addr, MLX5_UAR_SIZE,
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ ERROR("Failed to reserve UAR address space, please adjust "
+ "MLX5_UAR_SIZE or try --base-virtaddr");
+ ret = ENOMEM;
+ return ret;
+ }
+ /* Accept either same addr or a new addr returned from mmap if target
+ * range occupied.
+ */
+ INFO("Reserved UAR address space: %p", addr);
+ priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
+ uar_base = addr; /* process local, don't reserve again. */
+ return 0;
+}
+
+/**
+ * Reserve UAR address space for secondary process, align with
+ * primary process.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_uar_init_secondary(struct priv *priv)
+{
+ void *addr;
+ int ret;
+
+ assert(priv->uar_base);
+ if (uar_base) { /* already reserved. */
+ assert(uar_base == priv->uar_base);
+ return 0;
+ }
+ /* anonymous mmap, no real memory consumption. */
+ addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ ERROR("UAR mmap failed: %p size: %llu",
+ priv->uar_base, MLX5_UAR_SIZE);
+ ret = ENXIO;
+ return ret;
+ }
+ if (priv->uar_base != addr) {
+ ERROR("UAR address %p size %llu occupied, please adjust "
+ "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr",
+ priv->uar_base, MLX5_UAR_SIZE);
+ ret = ENXIO;
+ return ret;
+ }
+ uar_base = addr; /* process local, don't reserve again */
+ INFO("Reserved UAR address space: %p", addr);
+ return 0;
}
/**
@@ -565,7 +601,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
/* Save PCI address. */
mlx5_dev[idx].pci_addr = pci_dev->addr;
- list = ibv_get_device_list(&i);
+ list = mlx5_glue->get_device_list(&i);
if (list == NULL) {
assert(errno);
if (errno == ENOSYS)
@@ -615,12 +651,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
" (SR-IOV: %s)",
list[i]->name,
sriov ? "true" : "false");
- attr_ctx = ibv_open_device(list[i]);
+ attr_ctx = mlx5_glue->open_device(list[i]);
err = errno;
break;
}
if (attr_ctx == NULL) {
- ibv_free_device_list(list);
+ mlx5_glue->free_device_list(list);
switch (err) {
case 0:
ERROR("cannot access device, is mlx5_ib loaded?");
@@ -639,7 +675,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
* Multi-packet send is supported by ConnectX-4 Lx PF as well
* as all ConnectX-5 devices.
*/
- mlx5dv_query_device(attr_ctx, &attrs_out);
+ mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
DEBUG("Enhanced MPW is supported");
@@ -657,11 +693,13 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
cqe_comp = 0;
else
cqe_comp = 1;
- if (ibv_query_device_ex(attr_ctx, NULL, &device_attr))
+ if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr))
goto error;
INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
+ char name[RTE_ETH_NAME_MAX_LEN];
+ int len;
uint32_t port = i + 1; /* ports are indexed from one */
uint32_t test = (1 << i);
struct ibv_context *ctx = NULL;
@@ -673,26 +711,27 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ether_addr mac;
uint16_t num_vfs = 0;
struct ibv_device_attr_ex device_attr;
- struct mlx5_args args = {
- .cqe_comp = MLX5_ARG_UNSET,
+ struct mlx5_dev_config config = {
+ .cqe_comp = cqe_comp,
+ .mps = mps,
+ .tunnel_en = tunnel_en,
+ .tx_vec_en = 1,
+ .rx_vec_en = 1,
+ .mpw_hdr_dseg = 0,
.txq_inline = MLX5_ARG_UNSET,
.txqs_inline = MLX5_ARG_UNSET,
- .mps = MLX5_ARG_UNSET,
- .mpw_hdr_dseg = MLX5_ARG_UNSET,
.inline_max_packet_sz = MLX5_ARG_UNSET,
- .tso = MLX5_ARG_UNSET,
- .tx_vec_en = MLX5_ARG_UNSET,
- .rx_vec_en = MLX5_ARG_UNSET,
};
- mlx5_dev[idx].ports |= test;
+ len = snprintf(name, sizeof(name), PCI_PRI_FMT,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ if (device_attr.orig_attr.phys_port_cnt > 1)
+ snprintf(name + len, sizeof(name), " port %u", i);
- if (mlx5_is_secondary()) {
- /* from rte_ethdev.c */
- char name[RTE_ETH_NAME_MAX_LEN];
+ mlx5_dev[idx].ports |= test;
- snprintf(name, sizeof(name), "%s port %u",
- ibv_get_device_name(ibv_dev), port);
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (eth_dev == NULL) {
ERROR("can not attach rte ethdev");
@@ -702,6 +741,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->device = &pci_dev->device;
eth_dev->dev_ops = &mlx5_dev_sec_ops;
priv = eth_dev->data->dev_private;
+ err = priv_uar_init_secondary(priv);
+ if (err < 0) {
+ err = -err;
+ goto error;
+ }
/* Receive command fd from primary process */
err = priv_socket_connect(priv);
if (err < 0) {
@@ -710,26 +754,31 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
}
/* Remap UAR for Tx queues. */
err = priv_tx_uar_remap(priv, err);
- if (err < 0) {
- err = -err;
+ if (err)
goto error;
- }
- priv_dev_select_rx_function(priv, eth_dev);
- priv_dev_select_tx_function(priv, eth_dev);
+ /*
+ * Ethdev pointer is still required as input since
+ * the primary device is not accessible from the
+ * secondary process.
+ */
+ eth_dev->rx_pkt_burst =
+ priv_select_rx_function(priv, eth_dev);
+ eth_dev->tx_pkt_burst =
+ priv_select_tx_function(priv, eth_dev);
continue;
}
DEBUG("using port %u (%08" PRIx32 ")", port, test);
- ctx = ibv_open_device(ibv_dev);
+ ctx = mlx5_glue->open_device(ibv_dev);
if (ctx == NULL) {
err = ENODEV;
goto port_error;
}
- ibv_query_device_ex(ctx, NULL, &device_attr);
+ mlx5_glue->query_device_ex(ctx, NULL, &device_attr);
/* Check port status. */
- err = ibv_query_port(ctx, port, &port_attr);
+ err = mlx5_glue->query_port(ctx, port, &port_attr);
if (err) {
ERROR("port query failed: %s", strerror(err));
goto port_error;
@@ -744,11 +793,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (port_attr.state != IBV_PORT_ACTIVE)
DEBUG("port %d is not active: \"%s\" (%d)",
- port, ibv_port_state_str(port_attr.state),
+ port, mlx5_glue->port_state_str(port_attr.state),
port_attr.state);
/* Allocate protection domain. */
- pd = ibv_alloc_pd(ctx);
+ pd = mlx5_glue->alloc_pd(ctx);
if (pd == NULL) {
ERROR("PD allocation failure");
err = ENOMEM;
@@ -774,107 +823,86 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
- priv->mps = mps; /* Enable MPW by default if supported. */
- priv->cqe_comp = cqe_comp;
- priv->tunnel_en = tunnel_en;
- /* Enable vector by default if supported. */
- priv->tx_vec_en = 1;
- priv->rx_vec_en = 1;
- err = mlx5_args(&args, pci_dev->device.devargs);
+ err = mlx5_args(&config, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
goto port_error;
}
- mlx5_args_assign(priv, &args);
- if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
+ if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) {
ERROR("ibv_query_device_ex() failed");
goto port_error;
}
- priv->hw_csum =
- !!(device_attr_ex.device_cap_flags_ex &
- IBV_DEVICE_RAW_IP_CSUM);
+ config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
+ IBV_DEVICE_RAW_IP_CSUM);
DEBUG("checksum offloading is %ssupported",
- (priv->hw_csum ? "" : "not "));
+ (config.hw_csum ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
- priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
- IBV_DEVICE_VXLAN_SUPPORT);
+ config.hw_csum_l2tun =
+ !!(exp_device_attr.exp_device_cap_flags &
+ IBV_DEVICE_VXLAN_SUPPORT);
#endif
- DEBUG("L2 tunnel checksum offloads are %ssupported",
- (priv->hw_csum_l2tun ? "" : "not "));
+ DEBUG("Rx L2 tunnel checksum offloads are %ssupported",
+ (config.hw_csum_l2tun ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- priv->counter_set_supported = !!(device_attr.max_counter_sets);
- ibv_describe_counter_set(ctx, 0, &cs_desc);
+ config.flow_counter_en = !!(device_attr.max_counter_sets);
+ mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
cs_desc.counter_type, cs_desc.num_of_cs,
cs_desc.attributes);
#endif
- priv->ind_table_max_size =
+ config.ind_table_max_size =
device_attr_ex.rss_caps.max_rwq_indirection_table_size;
/* Remove this check once DPDK supports larger/variable
* indirection tables. */
- if (priv->ind_table_max_size >
+ if (config.ind_table_max_size >
(unsigned int)ETH_RSS_RETA_SIZE_512)
- priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+ config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
DEBUG("maximum RX indirection table size is %u",
- priv->ind_table_max_size);
- priv->hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
+ config.ind_table_max_size);
+ config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
DEBUG("VLAN stripping is %ssupported",
- (priv->hw_vlan_strip ? "" : "not "));
+ (config.hw_vlan_strip ? "" : "not "));
- priv->hw_fcs_strip =
- !!(device_attr_ex.orig_attr.device_cap_flags &
- IBV_WQ_FLAGS_SCATTER_FCS);
+ config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_SCATTER_FCS);
DEBUG("FCS stripping configuration is %ssupported",
- (priv->hw_fcs_strip ? "" : "not "));
+ (config.hw_fcs_strip ? "" : "not "));
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
- priv->hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
+ config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
#endif
DEBUG("hardware RX end alignment padding is %ssupported",
- (priv->hw_padding ? "" : "not "));
+ (config.hw_padding ? "" : "not "));
priv_get_num_vfs(priv, &num_vfs);
- priv->sriov = (num_vfs || sriov);
- priv->tso = ((priv->tso) &&
- (device_attr_ex.tso_caps.max_tso > 0) &&
- (device_attr_ex.tso_caps.supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
- if (priv->tso)
- priv->max_tso_payload_sz =
- device_attr_ex.tso_caps.max_tso;
- if (priv->mps && !mps) {
+ config.sriov = (num_vfs || sriov);
+ config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (config.tso)
+ config.tso_max_payload_sz =
+ device_attr_ex.tso_caps.max_tso;
+ if (config.mps && !mps) {
ERROR("multi-packet send not supported on this device"
" (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP;
goto port_error;
- } else if (priv->mps && priv->tso) {
- WARN("multi-packet send not supported in conjunction "
- "with TSO. MPS disabled");
- priv->mps = 0;
}
INFO("%sMPS is %s",
- priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
- priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
- /* Set default values for Enhanced MPW, a.k.a MPWv2. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
- if (args.txqs_inline == MLX5_ARG_UNSET)
- priv->txqs_inline = MLX5_EMPW_MIN_TXQS;
- if (args.inline_max_packet_sz == MLX5_ARG_UNSET)
- priv->inline_max_packet_sz =
- MLX5_EMPW_MAX_INLINE_LEN;
- if (args.txq_inline == MLX5_ARG_UNSET)
- priv->txq_inline = MLX5_WQE_SIZE_MAX -
- MLX5_WQE_SIZE;
- }
- if (priv->cqe_comp && !cqe_comp) {
+ config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
+ config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ if (config.cqe_comp && !cqe_comp) {
WARN("Rx CQE compression isn't supported");
- priv->cqe_comp = 0;
+ config.cqe_comp = 0;
}
+ err = priv_uar_init_primary(priv);
+ if (err)
+ goto port_error;
/* Configure the first MAC address by default. */
if (priv_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx5_en loaded?"
@@ -902,14 +930,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv_get_mtu(priv, &priv->mtu);
DEBUG("port %u MTU is %u", priv->port, priv->mtu);
- /* from rte_ethdev.c */
- {
- char name[RTE_ETH_NAME_MAX_LEN];
-
- snprintf(name, sizeof(name), "%s port %u",
- ibv_get_device_name(ibv_dev), port);
- eth_dev = rte_eth_dev_allocate(name);
- }
+ eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL) {
ERROR("can not allocate rte ethdev");
err = ENOMEM;
@@ -920,6 +941,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->device->driver = &mlx5_driver.driver;
+ /*
+ * Initialize burst functions to prevent crashes before link-up.
+ */
+ eth_dev->rx_pkt_burst = removed_rx_burst;
+ eth_dev->tx_pkt_burst = removed_tx_burst;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops;
/* Register MAC address. */
@@ -933,22 +959,24 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
.free = &mlx5_free_verbs_buf,
.data = priv,
};
- mlx5dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
- (void *)((uintptr_t)&alctr));
+ mlx5_glue->dv_set_context_attr(ctx,
+ MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
+ (void *)((uintptr_t)&alctr));
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
priv_set_flags(priv, ~IFF_UP, IFF_UP);
- mlx5_link_update(priv->dev, 1);
+ /* Store device configuration on private structure. */
+ priv->config = config;
continue;
port_error:
if (priv)
rte_free(priv);
if (pd)
- claim_zero(ibv_dealloc_pd(pd));
+ claim_zero(mlx5_glue->dealloc_pd(pd));
if (ctx)
- claim_zero(ibv_close_device(ctx));
+ claim_zero(mlx5_glue->close_device(ctx));
break;
}
@@ -967,9 +995,9 @@ port_error:
error:
if (attr_ctx)
- claim_zero(ibv_close_device(attr_ctx));
+ claim_zero(mlx5_glue->close_device(attr_ctx));
if (list)
- ibv_free_device_list(list);
+ mlx5_glue->free_device_list(list);
assert(err >= 0);
return -err;
}
@@ -1021,6 +1049,88 @@ static struct rte_pci_driver mlx5_driver = {
.drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
};
+#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
+
+/**
+ * Initialization routine for run-time dependency on rdma-core.
+ */
+static int
+mlx5_glue_init(void)
+{
+ const char *path[] = {
+ /*
+ * A basic security check is necessary before trusting
+ * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
+ */
+ (geteuid() == getuid() && getegid() == getgid() ?
+ getenv("MLX5_GLUE_PATH") : NULL),
+ RTE_EAL_PMD_PATH,
+ };
+ unsigned int i = 0;
+ void *handle = NULL;
+ void **sym;
+ const char *dlmsg;
+
+ while (!handle && i != RTE_DIM(path)) {
+ const char *end;
+ size_t len;
+ int ret;
+
+ if (!path[i]) {
+ ++i;
+ continue;
+ }
+ end = strpbrk(path[i], ":;");
+ if (!end)
+ end = path[i] + strlen(path[i]);
+ len = end - path[i];
+ ret = 0;
+ do {
+ char name[ret + 1];
+
+ ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
+ (int)len, path[i],
+ (!len || *(end - 1) == '/') ? "" : "/");
+ if (ret == -1)
+ break;
+ if (sizeof(name) != (size_t)ret + 1)
+ continue;
+ DEBUG("looking for rdma-core glue as \"%s\"", name);
+ handle = dlopen(name, RTLD_LAZY);
+ break;
+ } while (1);
+ path[i] = end + 1;
+ if (!*end)
+ ++i;
+ }
+ if (!handle) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ WARN("cannot load glue library: %s", dlmsg);
+ goto glue_error;
+ }
+ sym = dlsym(handle, "mlx5_glue");
+ if (!sym || !*sym) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ ERROR("cannot resolve glue symbol: %s", dlmsg);
+ goto glue_error;
+ }
+ mlx5_glue = *sym;
+ return 0;
+glue_error:
+ if (handle)
+ dlclose(handle);
+ WARN("cannot initialize PMD due to missing run-time"
+ " dependency on rdma-core libraries (libibverbs,"
+ " libmlx5)");
+ return -rte_errno;
+}
+
+#endif
+
/**
* Driver initialization routine.
*/
@@ -1040,7 +1150,26 @@ rte_mlx5_pmd_init(void)
/* Match the size of Rx completion entry to the size of a cacheline. */
if (RTE_CACHE_LINE_SIZE == 128)
setenv("MLX5_CQE_SIZE", "128", 0);
- ibv_fork_init();
+#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
+ if (mlx5_glue_init())
+ return;
+ assert(mlx5_glue);
+#endif
+#ifndef NDEBUG
+ /* Glue structure must not contain any NULL pointers. */
+ {
+ unsigned int i;
+
+ for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
+ assert(((const void *const *)mlx5_glue)[i]);
+ }
+#endif
+ if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
+ ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
+ mlx5_glue->version, MLX5_GLUE_VERSION);
+ return;
+ }
+ mlx5_glue->fork_init();
rte_pci_register(&mlx5_driver);
}
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e6a69b82..965c19f2 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#ifndef RTE_PMD_MLX5_H_
@@ -53,7 +25,7 @@
#include <rte_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_spinlock.h>
#include <rte_interrupts.h>
#include <rte_errno.h>
@@ -90,6 +62,57 @@ struct mlx5_xstats_ctrl {
/* Flow list . */
TAILQ_HEAD(mlx5_flows, rte_flow);
+/* Default PMD specific parameter value. */
+#define MLX5_ARG_UNSET (-1)
+
+/*
+ * Device configuration structure.
+ *
+ * Merged configuration from:
+ *
+ * - Device capabilities,
+ * - User device parameters disabled features.
+ */
+struct mlx5_dev_config {
+ unsigned int hw_csum:1; /* Checksum offload is supported. */
+ unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
+ unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
+ unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
+ unsigned int hw_padding:1; /* End alignment padding is supported. */
+ unsigned int sriov:1; /* This is a VF or PF with VF devices. */
+ unsigned int mps:2; /* Multi-packet send supported mode. */
+ unsigned int tunnel_en:1; /* Whether tunnel is supported. */
+ unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
+ unsigned int cqe_comp:1; /* CQE compression is enabled. */
+ unsigned int tso:1; /* Whether TSO is supported. */
+ unsigned int tx_vec_en:1; /* Tx vector is enabled. */
+ unsigned int rx_vec_en:1; /* Rx vector is enabled. */
+ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
+ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
+ unsigned int ind_table_max_size; /* Maximum indirection table size. */
+ int txq_inline; /* Maximum packet size for inlining. */
+ int txqs_inline; /* Queue number threshold for inlining. */
+ int inline_max_packet_sz; /* Max packet size for inlining. */
+};
+
+/**
+ * Type of objet being allocated.
+ */
+enum mlx5_verbs_alloc_type {
+ MLX5_VERBS_ALLOC_TYPE_NONE,
+ MLX5_VERBS_ALLOC_TYPE_TX_QUEUE,
+ MLX5_VERBS_ALLOC_TYPE_RX_QUEUE,
+};
+
+/**
+ * Verbs allocator needs a context to know in the callback which kind of
+ * resources it is allocating.
+ */
+struct mlx5_verbs_alloc_ctx {
+ enum mlx5_verbs_alloc_type type; /* Kind of object being allocated. */
+ const void *obj; /* Pointer to the DPDK object. */
+};
+
struct priv {
struct rte_eth_dev *dev; /* Ethernet device of master process. */
struct ibv_context *ctx; /* Verbs context. */
@@ -102,33 +125,13 @@ struct priv {
/* Device properties. */
uint16_t mtu; /* Configured MTU. */
uint8_t port; /* Physical port number. */
- unsigned int hw_csum:1; /* Checksum offload is supported. */
- unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
- unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
- unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
- unsigned int hw_padding:1; /* End alignment padding is supported. */
- unsigned int sriov:1; /* This is a VF or PF with VF devices. */
- unsigned int mps:2; /* Multi-packet send mode (0: disabled). */
- unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
- unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
unsigned int pending_alarm:1; /* An alarm is pending. */
- unsigned int tso:1; /* Whether TSO is supported. */
- unsigned int tunnel_en:1;
unsigned int isolated:1; /* Whether isolated mode is enabled. */
- unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */
- unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */
- unsigned int counter_set_supported:1; /* Counter set is supported. */
- /* Whether Tx offloads for tunneled packets are supported. */
- unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
- unsigned int txq_inline; /* Maximum packet size for inlining. */
- unsigned int txqs_inline; /* Queue number threshold for inlining. */
- unsigned int inline_max_packet_sz; /* Max packet size for inlining. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
- unsigned int ind_table_max_size; /* Maximum indirection table size. */
struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
struct rte_intr_handle intr_handle; /* Interrupt handler. */
unsigned int (*reta_idx)[]; /* RETA index table. */
@@ -148,7 +151,11 @@ struct priv {
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
rte_spinlock_t lock; /* Lock for control functions. */
int primary_socket; /* Unix socket for primary process. */
+ void *uar_base; /* Reserved address space for UAR mapping */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
+ struct mlx5_dev_config config; /* Device configuration. */
+ struct mlx5_verbs_alloc_ctx verbs_alloc_ctx;
+ /* Context for Verbs allocator. */
};
/**
@@ -165,6 +172,22 @@ priv_lock(struct priv *priv)
}
/**
+ * Try to lock private structure to protect it from concurrent access in the
+ * control path.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int
+priv_trylock(struct priv *priv)
+{
+ return rte_spinlock_trylock(&priv->lock);
+}
+
+/**
* Unlock private structure.
*
* @param priv
@@ -194,6 +217,8 @@ int priv_set_flags(struct priv *, unsigned int, unsigned int);
int mlx5_dev_configure(struct rte_eth_dev *);
void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *);
const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int priv_link_update(struct priv *, int);
+int priv_force_link_status_change(struct priv *, int);
int mlx5_link_update(struct rte_eth_dev *, int);
int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t);
int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
@@ -206,8 +231,9 @@ void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
int mlx5_set_link_down(struct rte_eth_dev *dev);
int mlx5_set_link_up(struct rte_eth_dev *dev);
-void priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev);
-void priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev);
+int mlx5_is_removed(struct rte_eth_dev *dev);
+eth_tx_burst_t priv_select_tx_function(struct priv *, struct rte_eth_dev *);
+eth_rx_burst_t priv_select_rx_function(struct priv *, struct rte_eth_dev *);
/* mlx5_mac.c */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 3a7706cf..c3334ca3 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -1,39 +1,13 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#ifndef RTE_PMD_MLX5_DEFS_H_
#define RTE_PMD_MLX5_DEFS_H_
+#include <rte_ethdev_driver.h>
+
#include "mlx5_autoconf.h"
/* Reported driver name. */
@@ -105,4 +79,20 @@
/* Number of packets vectorized Rx can simultaneously process in a loop. */
#define MLX5_VPMD_DESCS_PER_LOOP 4
+/* Supported RSS */
+#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+
+/* Maximum number of attempts to query link status before giving up. */
+#define MLX5_MAX_LINK_QUERY_ATTEMPTS 5
+
+/* Reserved address space for UAR mapping. */
+#define MLX5_UAR_SIZE (1ULL << 32)
+
+/* Offset of reserved UAR address space to hugepage memory. Offset is used here
+ * to minimize possibility of address next to hugepage being used by other code
+ * in either primary or secondary process, failing to map TX UAR would make TX
+ * packets invisible to HW.
+ */
+#define MLX5_UAR_OFFSET (1ULL << 32)
+
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index a3cef689..66650769 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#define _GNU_SOURCE
@@ -55,7 +27,7 @@
#include <sys/un.h>
#include <rte_atomic.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_bus_pci.h>
#include <rte_mbuf.h>
#include <rte_common.h>
@@ -64,6 +36,7 @@
#include <rte_malloc.h>
#include "mlx5.h"
+#include "mlx5_glue.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
@@ -119,33 +92,6 @@ struct ethtool_link_settings {
#endif
/**
- * Return private structure associated with an Ethernet device.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * Pointer to private structure.
- */
-struct priv *
-mlx5_get_priv(struct rte_eth_dev *dev)
-{
- return dev->data->dev_private;
-}
-
-/**
- * Check if running as a secondary process.
- *
- * @return
- * Nonzero if running as a secondary process.
- */
-inline int
-mlx5_is_secondary(void)
-{
- return rte_eal_process_type() == RTE_PROC_SECONDARY;
-}
-
-/**
* Get interface name from private structure.
*
* @param[in] priv
@@ -577,8 +523,26 @@ dev_configure(struct rte_eth_dev *dev)
unsigned int j;
unsigned int reta_idx_n;
const uint8_t use_app_rss_key =
- !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
-
+ !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t supp_rx_offloads =
+ (mlx5_priv_get_rx_port_offloads(priv) |
+ mlx5_priv_get_rx_queue_offloads(priv));
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+
+ if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
+ ERROR("Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ tx_offloads, supp_tx_offloads);
+ return ENOTSUP;
+ }
+ if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
+ ERROR("Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ rx_offloads, supp_rx_offloads);
+ return ENOTSUP;
+ }
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
@@ -606,7 +570,7 @@ dev_configure(struct rte_eth_dev *dev)
(void *)dev, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
- if (rxqs_n > priv->ind_table_max_size) {
+ if (rxqs_n > priv->config.ind_table_max_size) {
ERROR("cannot handle this many RX queues (%u)", rxqs_n);
return EINVAL;
}
@@ -619,7 +583,7 @@ dev_configure(struct rte_eth_dev *dev)
* maximum indirection table size for better balancing.
* The result is always rounded to the next power of two. */
reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
- priv->ind_table_max_size :
+ priv->config.ind_table_max_size :
rxqs_n));
if (priv_rss_reta_index_resize(priv, reta_idx_n))
return ENOMEM;
@@ -649,9 +613,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
int ret;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
-
priv_lock(priv);
ret = dev_configure(dev);
assert(ret >= 0);
@@ -670,7 +631,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
void
mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
unsigned int max;
char ifname[IF_NAMESIZE];
@@ -692,34 +654,18 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_offload_capa =
- (priv->hw_csum ?
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM) :
- 0) |
- (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
- DEV_RX_OFFLOAD_TIMESTAMP;
-
- if (!priv->mps)
- info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
- if (priv->hw_csum)
- info->tx_offload_capa |=
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
- if (priv->tso)
- info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
- if (priv->tunnel_en)
- info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ info->rx_queue_offload_capa =
+ mlx5_priv_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) |
+ info->rx_queue_offload_capa);
+ info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
- priv->reta_idx_n : priv->ind_table_max_size;
+ priv->reta_idx_n : config->ind_table_max_size;
info->hash_key_size = priv->rss_conf.rss_key_len;
info->speed_capa = priv->link_speed_capa;
+ info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
priv_unlock(priv);
}
@@ -761,7 +707,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
static int
mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
struct ethtool_cmd edata = {
.cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
};
@@ -827,7 +773,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
static int
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
struct ifreq ifr;
struct rte_eth_link dev_link;
@@ -913,25 +859,131 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
}
/**
- * DPDK callback to retrieve physical link information.
+ * Enable receiving and transmitting traffic.
*
- * @param dev
- * Pointer to Ethernet device structure.
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_link_start(struct priv *priv)
+{
+ struct rte_eth_dev *dev = priv->dev;
+ int err;
+
+ dev->tx_pkt_burst = priv_select_tx_function(priv, dev);
+ dev->rx_pkt_burst = priv_select_rx_function(priv, dev);
+ err = priv_dev_traffic_enable(priv, dev);
+ if (err)
+ ERROR("%p: error occurred while configuring control flows: %s",
+ (void *)priv, strerror(err));
+ err = priv_flow_start(priv, &priv->flows);
+ if (err)
+ ERROR("%p: error occurred while configuring flows: %s",
+ (void *)priv, strerror(err));
+}
+
+/**
+ * Disable receiving and transmitting traffic.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_link_stop(struct priv *priv)
+{
+ struct rte_eth_dev *dev = priv->dev;
+
+ priv_flow_stop(priv, &priv->flows);
+ priv_dev_traffic_disable(priv, dev);
+ dev->rx_pkt_burst = removed_rx_burst;
+ dev->tx_pkt_burst = removed_tx_burst;
+}
+
+/**
+ * Retrieve physical link information and update rx/tx_pkt_burst callbacks
+ * accordingly.
+ *
+ * @param priv
+ * Pointer to private structure.
* @param wait_to_complete
* Wait for request completion (ignored).
*/
int
-mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+priv_link_update(struct priv *priv, int wait_to_complete)
{
+ struct rte_eth_dev *dev = priv->dev;
struct utsname utsname;
int ver[3];
+ int ret;
+ struct rte_eth_link dev_link = dev->data->dev_link;
if (uname(&utsname) == -1 ||
sscanf(utsname.release, "%d.%d.%d",
&ver[0], &ver[1], &ver[2]) != 3 ||
KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
- return mlx5_link_update_unlocked_gset(dev, wait_to_complete);
- return mlx5_link_update_unlocked_gs(dev, wait_to_complete);
+ ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete);
+ else
+ ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete);
+ /* If lsc interrupt is disabled, should always be ready for traffic. */
+ if (!dev->data->dev_conf.intr_conf.lsc) {
+ priv_link_start(priv);
+ return ret;
+ }
+ /* Re-select burst callbacks only if link status has been changed. */
+ if (!ret && dev_link.link_status != dev->data->dev_link.link_status) {
+ if (dev->data->dev_link.link_status == ETH_LINK_UP)
+ priv_link_start(priv);
+ else
+ priv_link_stop(priv);
+ }
+ return ret;
+}
+
+/**
+ * Querying the link status till it changes to the desired state.
+ * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param status
+ * Link desired status.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+priv_force_link_status_change(struct priv *priv, int status)
+{
+ int try = 0;
+
+ while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) {
+ priv_link_update(priv, 0);
+ if (priv->dev->data->dev_link.link_status == status)
+ return 0;
+ try++;
+ sleep(1);
+ }
+ return -EAGAIN;
+}
+
+/**
+ * DPDK callback to retrieve physical link information.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param wait_to_complete
+ * Wait for request completion (ignored).
+ */
+int
+mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ priv_lock(priv);
+ ret = priv_link_update(priv, wait_to_complete);
+ priv_unlock(priv);
+ return ret;
}
/**
@@ -952,9 +1004,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
uint16_t kern_mtu;
int ret = 0;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
-
priv_lock(priv);
ret = priv_get_mtu(priv, &kern_mtu);
if (ret)
@@ -1002,9 +1051,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
};
int ret;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
-
ifr.ifr_data = (void *)&ethpause;
priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
@@ -1053,9 +1099,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
};
int ret;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
-
ifr.ifr_data = (void *)&ethpause;
ethpause.autoneg = fc_conf->autoneg;
if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
@@ -1150,7 +1193,7 @@ priv_link_status_update(struct priv *priv)
{
struct rte_eth_link *link = &priv->dev->data->dev_link;
- mlx5_link_update(priv->dev, 0);
+ priv_link_update(priv, 0);
if (((link->link_speed == 0) && link->link_status) ||
((link->link_speed != 0) && !link->link_status)) {
/*
@@ -1191,7 +1234,7 @@ priv_dev_status_handler(struct priv *priv)
/* Read all message and acknowledge them. */
for (;;) {
- if (ibv_get_async_event(priv->ctx, &event))
+ if (mlx5_glue->get_async_event(priv->ctx, &event))
break;
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
event.event_type == IBV_EVENT_PORT_ERR) &&
@@ -1203,7 +1246,7 @@ priv_dev_status_handler(struct priv *priv)
else
DEBUG("event type %d on port %d not handled",
event.event_type, event.element.port_num);
- ibv_ack_async_event(&event);
+ mlx5_glue->ack_async_event(&event);
}
if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
if (priv_link_status_update(priv))
@@ -1224,14 +1267,17 @@ mlx5_dev_link_status_handler(void *arg)
struct priv *priv = dev->data->dev_private;
int ret;
- priv_lock(priv);
- assert(priv->pending_alarm == 1);
+ while (!priv_trylock(priv)) {
+ /* Alarm is being canceled. */
+ if (priv->pending_alarm == 0)
+ return;
+ rte_pause();
+ }
priv->pending_alarm = 0;
ret = priv_link_status_update(priv);
priv_unlock(priv);
if (!ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
- NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
/**
@@ -1253,11 +1299,9 @@ mlx5_dev_interrupt_handler(void *cb_arg)
events = priv_dev_status_handler(priv);
priv_unlock(priv);
if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
- NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL,
- NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
}
/**
@@ -1295,9 +1339,10 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
if (priv->primary_socket)
rte_intr_callback_unregister(&priv->intr_handle_socket,
mlx5_dev_handler_socket, dev);
- if (priv->pending_alarm)
+ if (priv->pending_alarm) {
+ priv->pending_alarm = 0;
rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
- priv->pending_alarm = 0;
+ }
priv->intr_handle.fd = 0;
priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
priv->intr_handle_socket.fd = 0;
@@ -1317,7 +1362,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
{
int rc, flags;
- assert(!mlx5_is_secondary());
assert(priv->ctx->async_fd > 0);
flags = fcntl(priv->ctx->async_fd, F_GETFL);
rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
@@ -1348,8 +1392,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
*
* @param priv
* Pointer to private data structure.
- * @param dev
- * Pointer to rte_eth_dev structure.
* @param up
* Nonzero for link up, otherwise link down.
*
@@ -1357,24 +1399,9 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
* 0 on success, errno value on failure.
*/
static int
-priv_dev_set_link(struct priv *priv, struct rte_eth_dev *dev, int up)
+priv_dev_set_link(struct priv *priv, int up)
{
- int err;
-
- if (up) {
- err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
- if (err)
- return err;
- priv_dev_select_tx_function(priv, dev);
- priv_dev_select_rx_function(priv, dev);
- } else {
- err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
- if (err)
- return err;
- dev->rx_pkt_burst = removed_rx_burst;
- dev->tx_pkt_burst = removed_tx_burst;
- }
- return 0;
+ return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP);
}
/**
@@ -1393,7 +1420,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev)
int err;
priv_lock(priv);
- err = priv_dev_set_link(priv, dev, 0);
+ err = priv_dev_set_link(priv, 0);
priv_unlock(priv);
return err;
}
@@ -1414,7 +1441,7 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
int err;
priv_lock(priv);
- err = priv_dev_set_link(priv, dev, 1);
+ err = priv_dev_set_link(priv, 1);
priv_unlock(priv);
return err;
}
@@ -1426,32 +1453,44 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
* Pointer to private data structure.
* @param dev
* Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * Pointer to selected Tx burst function.
*/
-void
-priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
+eth_tx_burst_t
+priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
{
+ eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
+ struct mlx5_dev_config *config = &priv->config;
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO));
+ int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
+
assert(priv != NULL);
- assert(dev != NULL);
- dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
- if (priv->mps == MLX5_MPW_ENHANCED) {
- if (priv_check_vec_tx_support(priv) > 0) {
- if (priv_check_raw_vec_tx_support(priv) > 0)
- dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
+ if (vlan_insert || tso)
+ return tx_pkt_burst;
+ if (config->mps == MLX5_MPW_ENHANCED) {
+ if (priv_check_vec_tx_support(priv, dev) > 0) {
+ if (priv_check_raw_vec_tx_support(priv, dev) > 0)
+ tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
- dev->tx_pkt_burst = mlx5_tx_burst_vec;
+ tx_pkt_burst = mlx5_tx_burst_vec;
DEBUG("selected Enhanced MPW TX vectorized function");
} else {
- dev->tx_pkt_burst = mlx5_tx_burst_empw;
+ tx_pkt_burst = mlx5_tx_burst_empw;
DEBUG("selected Enhanced MPW TX function");
}
- } else if (priv->mps && priv->txq_inline) {
- dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
+ } else if (config->mps && (config->txq_inline > 0)) {
+ tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
- } else if (priv->mps) {
- dev->tx_pkt_burst = mlx5_tx_burst_mpw;
+ } else if (config->mps) {
+ tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
+ return tx_pkt_burst;
}
/**
@@ -1461,16 +1500,39 @@ priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
* Pointer to private data structure.
* @param dev
* Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * Pointer to selected Rx burst function.
*/
-void
-priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev)
+eth_rx_burst_t
+priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
{
+ eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
+
assert(priv != NULL);
- assert(dev != NULL);
if (priv_check_vec_rx_support(priv) > 0) {
- dev->rx_pkt_burst = mlx5_rx_burst_vec;
+ rx_pkt_burst = mlx5_rx_burst_vec;
DEBUG("selected RX vectorized function");
- } else {
- dev->rx_pkt_burst = mlx5_rx_burst;
}
+ return rx_pkt_burst;
+}
+
+/**
+ * Check if mlx5 device was removed.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 1 when device is removed, otherwise 0.
+ */
+int
+mlx5_is_removed(struct rte_eth_dev *dev)
+{
+ struct ibv_device_attr device_attr;
+ struct priv *priv = dev->data->dev_private;
+
+ if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO)
+ return 1;
+ return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f32dfdd3..26002c4b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
*/
#include <sys/queue.h>
@@ -44,13 +16,16 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
+#include <rte_ip.h>
#include "mlx5.h"
+#include "mlx5_defs.h"
#include "mlx5_prm.h"
+#include "mlx5_glue.h"
/* Define minimal priority for control plane flows. */
#define MLX5_CTRL_FLOW_PRIORITY 4
@@ -60,22 +35,9 @@
#define MLX5_IPV6 6
#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-struct ibv_counter_set_init_attr {
- int dummy;
-};
struct ibv_flow_spec_counter_action {
int dummy;
};
-struct ibv_counter_set {
- int dummy;
-};
-
-static inline int
-ibv_destroy_counter_set(struct ibv_counter_set *cs)
-{
- (void)cs;
- return -ENOTSUP;
-}
#endif
/* Dev ops structure defined in mlx5.c */
@@ -250,11 +212,8 @@ struct rte_flow {
uint8_t rss_key[40]; /**< copy of the RSS key. */
struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
- union {
- struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
- /**< Flow with Rx queue. */
- struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
- };
+ struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
+ /**< Flow with Rx queue. */
};
/** Static initializer for items. */
@@ -444,20 +403,12 @@ struct mlx5_flow_parse {
uint8_t rss_key[40]; /**< copy of the RSS key. */
enum hash_rxq_type layer; /**< Last pattern layer detected. */
struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
- union {
- struct {
- struct ibv_flow_attr *ibv_attr;
- /**< Pointer to Verbs attributes. */
- unsigned int offset;
- /**< Current position or total size of the attribute. */
- } queue[RTE_DIM(hash_rxq_init)];
- struct {
- struct ibv_flow_attr *ibv_attr;
- /**< Pointer to Verbs attributes. */
- unsigned int offset;
- /**< Current position or total size of the attribute. */
- } drop_q;
- };
+ struct {
+ struct ibv_flow_attr *ibv_attr;
+ /**< Pointer to Verbs attributes. */
+ unsigned int offset;
+ /**< Current position or total size of the attribute. */
+ } queue[RTE_DIM(hash_rxq_init)];
};
static const struct rte_flow_ops mlx5_flow_ops = {
@@ -537,7 +488,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
}
if (item->mask) {
unsigned int i;
- const uint8_t *spec = item->mask;
+ const uint8_t *spec = item->spec;
for (i = 0; i < size; ++i)
if ((spec[i] | mask[i]) != mask[i])
@@ -561,7 +512,8 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
}
/**
- * Copy the RSS configuration from the user ones.
+ * Copy the RSS configuration from the user ones, of the rss_conf is null,
+ * uses the driver one.
*
* @param priv
* Pointer to private structure.
@@ -578,15 +530,25 @@ priv_flow_convert_rss_conf(struct priv *priv,
struct mlx5_flow_parse *parser,
const struct rte_eth_rss_conf *rss_conf)
{
- const struct rte_eth_rss_conf *rss =
- rss_conf ? rss_conf : &priv->rss_conf;
-
- if (rss->rss_key_len > 40)
- return EINVAL;
- parser->rss_conf.rss_key_len = rss->rss_key_len;
- parser->rss_conf.rss_hf = rss->rss_hf;
- memcpy(parser->rss_key, rss->rss_key, rss->rss_key_len);
- parser->rss_conf.rss_key = parser->rss_key;
+ /*
+ * This function is also called at the beginning of
+ * priv_flow_convert_actions() to initialize the parser with the
+ * device default RSS configuration.
+ */
+ (void)priv;
+ if (rss_conf) {
+ if (rss_conf->rss_hf & MLX5_RSS_HF_MASK)
+ return EINVAL;
+ if (rss_conf->rss_key_len != 40)
+ return EINVAL;
+ if (rss_conf->rss_key_len && rss_conf->rss_key) {
+ parser->rss_conf.rss_key_len = rss_conf->rss_key_len;
+ memcpy(parser->rss_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ parser->rss_conf.rss_key = parser->rss_key;
+ }
+ parser->rss_conf.rss_hf = rss_conf->rss_hf;
+ }
return 0;
}
@@ -781,7 +743,7 @@ priv_flow_convert_actions(struct priv *priv,
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
parser->mark = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
- priv->counter_set_supported) {
+ priv->config.flow_counter_en) {
parser->count = 1;
} else {
goto exit_action_not_supported;
@@ -827,12 +789,8 @@ priv_flow_convert_items_validate(struct priv *priv,
(void)priv;
/* Initialise the offsets to start after verbs attribute. */
- if (parser->drop) {
- parser->drop_q.offset = sizeof(struct ibv_flow_attr);
- } else {
- for (i = 0; i != hash_rxq_init_n; ++i)
- parser->queue[i].offset = sizeof(struct ibv_flow_attr);
- }
+ for (i = 0; i != hash_rxq_init_n; ++i)
+ parser->queue[i].offset = sizeof(struct ibv_flow_attr);
for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
const struct mlx5_flow_items *token = NULL;
unsigned int n;
@@ -869,14 +827,16 @@ priv_flow_convert_items_validate(struct priv *priv,
parser->inner = IBV_FLOW_SPEC_INNER;
}
if (parser->drop) {
- parser->drop_q.offset += cur_item->dst_sz;
- } else if (parser->queues_n == 1) {
parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
} else {
for (n = 0; n != hash_rxq_init_n; ++n)
parser->queue[n].offset += cur_item->dst_sz;
}
}
+ if (parser->drop) {
+ parser->queue[HASH_RXQ_ETH].offset +=
+ sizeof(struct ibv_flow_spec_action_drop);
+ }
if (parser->mark) {
for (i = 0; i != hash_rxq_init_n; ++i)
parser->queue[i].offset +=
@@ -885,12 +845,8 @@ priv_flow_convert_items_validate(struct priv *priv,
if (parser->count) {
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
- if (parser->drop) {
- parser->drop_q.offset += size;
- } else {
- for (i = 0; i != hash_rxq_init_n; ++i)
- parser->queue[i].offset += size;
- }
+ for (i = 0; i != hash_rxq_init_n; ++i)
+ parser->queue[i].offset += size;
}
return 0;
exit_item_not_supported:
@@ -1103,14 +1059,6 @@ priv_flow_convert(struct priv *priv,
* Allocate the memory space to store verbs specifications.
*/
if (parser->drop) {
- parser->drop_q.ibv_attr =
- priv_flow_convert_allocate(priv, attr->priority,
- parser->drop_q.offset,
- error);
- if (!parser->drop_q.ibv_attr)
- return ENOMEM;
- parser->drop_q.offset = sizeof(struct ibv_flow_attr);
- } else if (parser->queues_n == 1) {
unsigned int priority =
attr->priority +
hash_rxq_init[HASH_RXQ_ETH].flow_priority;
@@ -1172,22 +1120,9 @@ priv_flow_convert(struct priv *priv,
* Last step. Complete missing specification to reach the RSS
* configuration.
*/
- if (parser->drop) {
- /*
- * Drop queue priority needs to be adjusted to
- * their most specific layer priority.
- */
- parser->drop_q.ibv_attr->priority =
- attr->priority +
- hash_rxq_init[parser->layer].flow_priority;
- } else if (parser->queues_n > 1) {
+ if (!parser->drop) {
priv_flow_convert_finalise(priv, parser);
} else {
- /*
- * Action queue have their priority overridden with
- * Ethernet priority, this priority needs to be adjusted to
- * their most specific layer priority.
- */
parser->queue[HASH_RXQ_ETH].ibv_attr->priority =
attr->priority +
hash_rxq_init[parser->layer].flow_priority;
@@ -1195,10 +1130,6 @@ priv_flow_convert(struct priv *priv,
exit_free:
/* Only verification is expected, all resources should be released. */
if (!parser->create) {
- if (parser->drop) {
- rte_free(parser->drop_q.ibv_attr);
- parser->drop_q.ibv_attr = NULL;
- }
for (i = 0; i != hash_rxq_init_n; ++i) {
if (parser->queue[i].ibv_attr) {
rte_free(parser->queue[i].ibv_attr);
@@ -1240,14 +1171,6 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
unsigned int i;
void *dst;
- if (parser->drop) {
- dst = (void *)((uintptr_t)parser->drop_q.ibv_attr +
- parser->drop_q.offset);
- memcpy(dst, src, size);
- ++parser->drop_q.ibv_attr->num_of_specs;
- parser->drop_q.offset += size;
- return;
- }
for (i = 0; i != hash_rxq_init_n; ++i) {
if (!parser->queue[i].ibv_attr)
continue;
@@ -1340,14 +1263,6 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item,
if (!mask)
mask = default_mask;
- if (parser->drop) {
- eth = (void *)((uintptr_t)parser->drop_q.ibv_attr +
- parser->drop_q.offset - eth_size);
- eth->val.vlan_tag = spec->tci;
- eth->mask.vlan_tag = mask->tci;
- eth->val.vlan_tag &= eth->mask.vlan_tag;
- return 0;
- }
for (i = 0; i != hash_rxq_init_n; ++i) {
if (!parser->queue[i].ibv_attr)
continue;
@@ -1443,6 +1358,8 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
parser->layer = HASH_RXQ_IPV6;
if (spec) {
unsigned int i;
+ uint32_t vtc_flow_val;
+ uint32_t vtc_flow_mask;
if (!mask)
mask = default_mask;
@@ -1454,7 +1371,20 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
RTE_DIM(ipv6.mask.src_ip));
memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
RTE_DIM(ipv6.mask.dst_ip));
- ipv6.mask.flow_label = mask->hdr.vtc_flow;
+ vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
+ vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
+ ipv6.val.flow_label =
+ rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
+ ipv6.val.next_hdr = spec->hdr.proto;
+ ipv6.val.hop_limit = spec->hdr.hop_limits;
+ ipv6.mask.flow_label =
+ rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
ipv6.mask.next_hdr = mask->hdr.proto;
ipv6.mask.hop_limit = mask->hdr.hop_limits;
/* Remove unwanted bits from values. */
@@ -1463,6 +1393,7 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
}
ipv6.val.flow_label &= ipv6.mask.flow_label;
+ ipv6.val.traffic_class &= ipv6.mask.traffic_class;
ipv6.val.next_hdr &= ipv6.mask.next_hdr;
ipv6.val.hop_limit &= ipv6.mask.hop_limit;
}
@@ -1664,7 +1595,7 @@ mlx5_flow_create_count(struct priv *priv __rte_unused,
};
init_attr.counter_set_id = 0;
- parser->cs = ibv_create_counter_set(priv->ctx, &init_attr);
+ parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr);
if (!parser->cs)
return EINVAL;
counter.counter_set_handle = parser->cs->handle;
@@ -1701,23 +1632,25 @@ priv_flow_create_action_queue_drop(struct priv *priv,
assert(priv->pd);
assert(priv->ctx);
flow->drop = 1;
- drop = (void *)((uintptr_t)parser->drop_q.ibv_attr +
- parser->drop_q.offset);
+ drop = (void *)((uintptr_t)parser->queue[HASH_RXQ_ETH].ibv_attr +
+ parser->queue[HASH_RXQ_ETH].offset);
*drop = (struct ibv_flow_spec_action_drop){
.type = IBV_FLOW_SPEC_ACTION_DROP,
.size = size,
};
- ++parser->drop_q.ibv_attr->num_of_specs;
- parser->drop_q.offset += size;
- flow->drxq.ibv_attr = parser->drop_q.ibv_attr;
+ ++parser->queue[HASH_RXQ_ETH].ibv_attr->num_of_specs;
+ parser->queue[HASH_RXQ_ETH].offset += size;
+ flow->frxq[HASH_RXQ_ETH].ibv_attr =
+ parser->queue[HASH_RXQ_ETH].ibv_attr;
if (parser->count)
flow->cs = parser->cs;
if (!priv->dev->data->dev_started)
return 0;
- parser->drop_q.ibv_attr = NULL;
- flow->drxq.ibv_flow = ibv_create_flow(priv->flow_drop_queue->qp,
- flow->drxq.ibv_attr);
- if (!flow->drxq.ibv_flow) {
+ parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
+ flow->frxq[HASH_RXQ_ETH].ibv_flow =
+ mlx5_glue->create_flow(priv->flow_drop_queue->qp,
+ flow->frxq[HASH_RXQ_ETH].ibv_attr);
+ if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "flow rule creation failure");
err = ENOMEM;
@@ -1726,16 +1659,17 @@ priv_flow_create_action_queue_drop(struct priv *priv,
return 0;
error:
assert(flow);
- if (flow->drxq.ibv_flow) {
- claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
- flow->drxq.ibv_flow = NULL;
+ if (flow->frxq[HASH_RXQ_ETH].ibv_flow) {
+ claim_zero(mlx5_glue->destroy_flow
+ (flow->frxq[HASH_RXQ_ETH].ibv_flow));
+ flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
}
- if (flow->drxq.ibv_attr) {
- rte_free(flow->drxq.ibv_attr);
- flow->drxq.ibv_attr = NULL;
+ if (flow->frxq[HASH_RXQ_ETH].ibv_attr) {
+ rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);
+ flow->frxq[HASH_RXQ_ETH].ibv_attr = NULL;
}
if (flow->cs) {
- claim_zero(ibv_destroy_counter_set(flow->cs));
+ claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
flow->cs = NULL;
parser->cs = NULL;
}
@@ -1839,8 +1773,8 @@ priv_flow_create_action_queue(struct priv *priv,
if (!flow->frxq[i].hrxq)
continue;
flow->frxq[i].ibv_flow =
- ibv_create_flow(flow->frxq[i].hrxq->qp,
- flow->frxq[i].ibv_attr);
+ mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
+ flow->frxq[i].ibv_attr);
if (!flow->frxq[i].ibv_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -1866,7 +1800,7 @@ error:
if (flow->frxq[i].ibv_flow) {
struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow;
- claim_zero(ibv_destroy_flow(ibv_flow));
+ claim_zero(mlx5_glue->destroy_flow(ibv_flow));
}
if (flow->frxq[i].hrxq)
mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
@@ -1874,7 +1808,7 @@ error:
rte_free(flow->frxq[i].ibv_attr);
}
if (flow->cs) {
- claim_zero(ibv_destroy_counter_set(flow->cs));
+ claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
flow->cs = NULL;
parser->cs = NULL;
}
@@ -1947,13 +1881,10 @@ priv_flow_create(struct priv *priv,
DEBUG("Flow created %p", (void *)flow);
return flow;
exit:
- if (parser.drop) {
- rte_free(parser.drop_q.ibv_attr);
- } else {
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser.queue[i].ibv_attr)
- rte_free(parser.queue[i].ibv_attr);
- }
+ ERROR("flow creation error: %s", error->message);
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser.queue[i].ibv_attr)
+ rte_free(parser.queue[i].ibv_attr);
}
rte_free(flow);
return NULL;
@@ -2055,15 +1986,17 @@ priv_flow_destroy(struct priv *priv,
}
free:
if (flow->drop) {
- if (flow->drxq.ibv_flow)
- claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
- rte_free(flow->drxq.ibv_attr);
+ if (flow->frxq[HASH_RXQ_ETH].ibv_flow)
+ claim_zero(mlx5_glue->destroy_flow
+ (flow->frxq[HASH_RXQ_ETH].ibv_flow));
+ rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);
} else {
for (i = 0; i != hash_rxq_init_n; ++i) {
struct mlx5_flow *frxq = &flow->frxq[i];
if (frxq->ibv_flow)
- claim_zero(ibv_destroy_flow(frxq->ibv_flow));
+ claim_zero(mlx5_glue->destroy_flow
+ (frxq->ibv_flow));
if (frxq->hrxq)
mlx5_priv_hrxq_release(priv, frxq->hrxq);
if (frxq->ibv_attr)
@@ -2071,7 +2004,7 @@ free:
}
}
if (flow->cs) {
- claim_zero(ibv_destroy_counter_set(flow->cs));
+ claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
flow->cs = NULL;
}
TAILQ_REMOVE(list, flow, next);
@@ -2119,35 +2052,38 @@ priv_flow_create_drop_queue(struct priv *priv)
WARN("cannot allocate memory for drop queue");
goto error;
}
- fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
+ fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!fdq->cq) {
WARN("cannot allocate CQ for drop queue");
goto error;
}
- fdq->wq = ibv_create_wq(priv->ctx,
- &(struct ibv_wq_init_attr){
+ fdq->wq = mlx5_glue->create_wq
+ (priv->ctx,
+ &(struct ibv_wq_init_attr){
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
.max_sge = 1,
.pd = priv->pd,
.cq = fdq->cq,
- });
+ });
if (!fdq->wq) {
WARN("cannot allocate WQ for drop queue");
goto error;
}
- fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
- &(struct ibv_rwq_ind_table_init_attr){
+ fdq->ind_table = mlx5_glue->create_rwq_ind_table
+ (priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = 0,
.ind_tbl = &fdq->wq,
.comp_mask = 0,
- });
+ });
if (!fdq->ind_table) {
WARN("cannot allocate indirection table for drop queue");
goto error;
}
- fdq->qp = ibv_create_qp_ex(priv->ctx,
- &(struct ibv_qp_init_attr_ex){
+ fdq->qp = mlx5_glue->create_qp_ex
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
IBV_QP_INIT_ATTR_PD |
@@ -2162,7 +2098,7 @@ priv_flow_create_drop_queue(struct priv *priv)
},
.rwq_ind_tbl = fdq->ind_table,
.pd = priv->pd
- });
+ });
if (!fdq->qp) {
WARN("cannot allocate QP for drop queue");
goto error;
@@ -2171,13 +2107,13 @@ priv_flow_create_drop_queue(struct priv *priv)
return 0;
error:
if (fdq->qp)
- claim_zero(ibv_destroy_qp(fdq->qp));
+ claim_zero(mlx5_glue->destroy_qp(fdq->qp));
if (fdq->ind_table)
- claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
if (fdq->wq)
- claim_zero(ibv_destroy_wq(fdq->wq));
+ claim_zero(mlx5_glue->destroy_wq(fdq->wq));
if (fdq->cq)
- claim_zero(ibv_destroy_cq(fdq->cq));
+ claim_zero(mlx5_glue->destroy_cq(fdq->cq));
if (fdq)
rte_free(fdq);
priv->flow_drop_queue = NULL;
@@ -2198,13 +2134,13 @@ priv_flow_delete_drop_queue(struct priv *priv)
if (!fdq)
return;
if (fdq->qp)
- claim_zero(ibv_destroy_qp(fdq->qp));
+ claim_zero(mlx5_glue->destroy_qp(fdq->qp));
if (fdq->ind_table)
- claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
if (fdq->wq)
- claim_zero(ibv_destroy_wq(fdq->wq));
+ claim_zero(mlx5_glue->destroy_wq(fdq->wq));
if (fdq->cq)
- claim_zero(ibv_destroy_cq(fdq->cq));
+ claim_zero(mlx5_glue->destroy_cq(fdq->cq));
rte_free(fdq);
priv->flow_drop_queue = NULL;
}
@@ -2224,23 +2160,34 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
unsigned int i;
+ struct mlx5_ind_table_ibv *ind_tbl = NULL;
if (flow->drop) {
- if (!flow->drxq.ibv_flow)
+ if (!flow->frxq[HASH_RXQ_ETH].ibv_flow)
continue;
- claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
- flow->drxq.ibv_flow = NULL;
+ claim_zero(mlx5_glue->destroy_flow
+ (flow->frxq[HASH_RXQ_ETH].ibv_flow));
+ flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
+ DEBUG("Flow %p removed", (void *)flow);
/* Next flow. */
continue;
}
+ /* Verify the flow has not already been cleaned. */
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].ibv_flow)
+ continue;
+ /*
+ * Indirection table may be necessary to remove the
+ * flags in the Rx queues.
+ * This helps to speed-up the process by avoiding
+ * another loop.
+ */
+ ind_tbl = flow->frxq[i].hrxq->ind_table;
+ break;
+ }
+ if (i == hash_rxq_init_n)
+ return;
if (flow->mark) {
- struct mlx5_ind_table_ibv *ind_tbl = NULL;
-
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!flow->frxq[i].hrxq)
- continue;
- ind_tbl = flow->frxq[i].hrxq->ind_table;
- }
assert(ind_tbl);
for (i = 0; i != ind_tbl->queues_n; ++i)
(*priv->rxqs)[ind_tbl->queues[i]]->mark = 0;
@@ -2248,7 +2195,8 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
for (i = 0; i != hash_rxq_init_n; ++i) {
if (!flow->frxq[i].ibv_flow)
continue;
- claim_zero(ibv_destroy_flow(flow->frxq[i].ibv_flow));
+ claim_zero(mlx5_glue->destroy_flow
+ (flow->frxq[i].ibv_flow));
flow->frxq[i].ibv_flow = NULL;
mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
flow->frxq[i].hrxq = NULL;
@@ -2277,10 +2225,11 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list)
unsigned int i;
if (flow->drop) {
- flow->drxq.ibv_flow =
- ibv_create_flow(priv->flow_drop_queue->qp,
- flow->drxq.ibv_attr);
- if (!flow->drxq.ibv_flow) {
+ flow->frxq[HASH_RXQ_ETH].ibv_flow =
+ mlx5_glue->create_flow
+ (priv->flow_drop_queue->qp,
+ flow->frxq[HASH_RXQ_ETH].ibv_attr);
+ if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
DEBUG("Flow %p cannot be applied",
(void *)flow);
rte_errno = EINVAL;
@@ -2315,8 +2264,8 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list)
}
flow_create:
flow->frxq[i].ibv_flow =
- ibv_create_flow(flow->frxq[i].hrxq->qp,
- flow->frxq[i].ibv_attr);
+ mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
+ flow->frxq[i].ibv_attr);
if (!flow->frxq[i].ibv_flow) {
DEBUG("Flow %p cannot be applied",
(void *)flow);
@@ -2523,7 +2472,7 @@ priv_flow_query_count(struct ibv_counter_set *cs,
.out = counters,
.outlen = 2 * sizeof(uint64_t),
};
- int res = ibv_query_counter_set(&query_cs_attr, &query_out);
+ int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
if (res) {
rte_flow_error_set(error, -res,
@@ -2671,10 +2620,12 @@ priv_fdir_filter_convert(struct priv *priv,
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_UDP,
.spec = &attributes->l4,
+ .mask = &attributes->l4,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
@@ -2692,10 +2643,12 @@ priv_fdir_filter_convert(struct priv *priv,
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_TCP,
.spec = &attributes->l4,
+ .mask = &attributes->l4,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
@@ -2709,6 +2662,7 @@ priv_fdir_filter_convert(struct priv *priv,
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
@@ -2729,10 +2683,12 @@ priv_fdir_filter_convert(struct priv *priv,
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_UDP,
.spec = &attributes->l4,
+ .mask = &attributes->l4,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
@@ -2753,10 +2709,12 @@ priv_fdir_filter_convert(struct priv *priv,
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_TCP,
.spec = &attributes->l4,
+ .mask = &attributes->l4,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
@@ -2773,6 +2731,7 @@ priv_fdir_filter_convert(struct priv *priv,
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
break;
default:
@@ -2875,13 +2834,13 @@ priv_fdir_filter_delete(struct priv *priv,
if (parser.drop) {
struct ibv_flow_spec_action_drop *drop;
- drop = (void *)((uintptr_t)parser.drop_q.ibv_attr +
- parser.drop_q.offset);
+ drop = (void *)((uintptr_t)parser.queue[HASH_RXQ_ETH].ibv_attr +
+ parser.queue[HASH_RXQ_ETH].offset);
*drop = (struct ibv_flow_spec_action_drop){
.type = IBV_FLOW_SPEC_ACTION_DROP,
.size = sizeof(struct ibv_flow_spec_action_drop),
};
- parser.drop_q.ibv_attr->num_of_specs++;
+ parser.queue[HASH_RXQ_ETH].ibv_attr->num_of_specs++;
}
TAILQ_FOREACH(flow, &priv->flows, next) {
struct ibv_flow_attr *attr;
@@ -2892,14 +2851,8 @@ priv_fdir_filter_delete(struct priv *priv,
void *flow_spec;
unsigned int specs_n;
- if (parser.drop)
- attr = parser.drop_q.ibv_attr;
- else
- attr = parser.queue[HASH_RXQ_ETH].ibv_attr;
- if (flow->drop)
- flow_attr = flow->drxq.ibv_attr;
- else
- flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr;
+ attr = parser.queue[HASH_RXQ_ETH].ibv_attr;
+ flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr;
/* Compare first the attributes. */
if (memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr)))
continue;
@@ -2929,13 +2882,9 @@ wrong_flow:
if (flow)
priv_flow_destroy(priv, &priv->flows, flow);
exit:
- if (parser.drop) {
- rte_free(parser.drop_q.ibv_attr);
- } else {
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser.queue[i].ibv_attr)
- rte_free(parser.queue[i].ibv_attr);
- }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser.queue[i].ibv_attr)
+ rte_free(parser.queue[i].ibv_attr);
}
return -ret;
}
diff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c
new file mode 100644
index 00000000..1c4396ad
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_glue.c
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd.
+ */
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include "mlx5_autoconf.h"
+#include "mlx5_glue.h"
+
+static int
+mlx5_glue_fork_init(void)
+{
+ return ibv_fork_init();
+}
+
+static struct ibv_pd *
+mlx5_glue_alloc_pd(struct ibv_context *context)
+{
+ return ibv_alloc_pd(context);
+}
+
+static int
+mlx5_glue_dealloc_pd(struct ibv_pd *pd)
+{
+ return ibv_dealloc_pd(pd);
+}
+
+static struct ibv_device **
+mlx5_glue_get_device_list(int *num_devices)
+{
+ return ibv_get_device_list(num_devices);
+}
+
+static void
+mlx5_glue_free_device_list(struct ibv_device **list)
+{
+ ibv_free_device_list(list);
+}
+
+static struct ibv_context *
+mlx5_glue_open_device(struct ibv_device *device)
+{
+ return ibv_open_device(device);
+}
+
+static int
+mlx5_glue_close_device(struct ibv_context *context)
+{
+ return ibv_close_device(context);
+}
+
+static int
+mlx5_glue_query_device(struct ibv_context *context,
+ struct ibv_device_attr *device_attr)
+{
+ return ibv_query_device(context, device_attr);
+}
+
+static int
+mlx5_glue_query_device_ex(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr)
+{
+ return ibv_query_device_ex(context, input, attr);
+}
+
+static int
+mlx5_glue_query_port(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr)
+{
+ return ibv_query_port(context, port_num, port_attr);
+}
+
+static struct ibv_comp_channel *
+mlx5_glue_create_comp_channel(struct ibv_context *context)
+{
+ return ibv_create_comp_channel(context);
+}
+
+static int
+mlx5_glue_destroy_comp_channel(struct ibv_comp_channel *channel)
+{
+ return ibv_destroy_comp_channel(channel);
+}
+
+static struct ibv_cq *
+mlx5_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,
+ struct ibv_comp_channel *channel, int comp_vector)
+{
+ return ibv_create_cq(context, cqe, cq_context, channel, comp_vector);
+}
+
+static int
+mlx5_glue_destroy_cq(struct ibv_cq *cq)
+{
+ return ibv_destroy_cq(cq);
+}
+
+static int
+mlx5_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
+ void **cq_context)
+{
+ return ibv_get_cq_event(channel, cq, cq_context);
+}
+
+static void
+mlx5_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)
+{
+ ibv_ack_cq_events(cq, nevents);
+}
+
+static struct ibv_rwq_ind_table *
+mlx5_glue_create_rwq_ind_table(struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr)
+{
+ return ibv_create_rwq_ind_table(context, init_attr);
+}
+
+static int
+mlx5_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
+{
+ return ibv_destroy_rwq_ind_table(rwq_ind_table);
+}
+
+static struct ibv_wq *
+mlx5_glue_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr)
+{
+ return ibv_create_wq(context, wq_init_attr);
+}
+
+static int
+mlx5_glue_destroy_wq(struct ibv_wq *wq)
+{
+ return ibv_destroy_wq(wq);
+}
+static int
+mlx5_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
+{
+ return ibv_modify_wq(wq, wq_attr);
+}
+
+static struct ibv_flow *
+mlx5_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)
+{
+ return ibv_create_flow(qp, flow);
+}
+
+static int
+mlx5_glue_destroy_flow(struct ibv_flow *flow_id)
+{
+ return ibv_destroy_flow(flow_id);
+}
+
+static struct ibv_qp *
+mlx5_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)
+{
+ return ibv_create_qp(pd, qp_init_attr);
+}
+
+static struct ibv_qp *
+mlx5_glue_create_qp_ex(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex)
+{
+ return ibv_create_qp_ex(context, qp_init_attr_ex);
+}
+
+static int
+mlx5_glue_destroy_qp(struct ibv_qp *qp)
+{
+ return ibv_destroy_qp(qp);
+}
+
+static int
+mlx5_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
+{
+ return ibv_modify_qp(qp, attr, attr_mask);
+}
+
+static struct ibv_mr *
+mlx5_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
+{
+ return ibv_reg_mr(pd, addr, length, access);
+}
+
+static int
+mlx5_glue_dereg_mr(struct ibv_mr *mr)
+{
+ return ibv_dereg_mr(mr);
+}
+
+static struct ibv_counter_set *
+mlx5_glue_create_counter_set(struct ibv_context *context,
+ struct ibv_counter_set_init_attr *init_attr)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ (void)context;
+ (void)init_attr;
+ return NULL;
+#else
+ return ibv_create_counter_set(context, init_attr);
+#endif
+}
+
+static int
+mlx5_glue_destroy_counter_set(struct ibv_counter_set *cs)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ (void)cs;
+ return ENOTSUP;
+#else
+ return ibv_destroy_counter_set(cs);
+#endif
+}
+
+static int
+mlx5_glue_describe_counter_set(struct ibv_context *context,
+ uint16_t counter_set_id,
+ struct ibv_counter_set_description *cs_desc)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ (void)context;
+ (void)counter_set_id;
+ (void)cs_desc;
+ return ENOTSUP;
+#else
+ return ibv_describe_counter_set(context, counter_set_id, cs_desc);
+#endif
+}
+
+static int
+mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,
+ struct ibv_counter_set_data *cs_data)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ (void)query_attr;
+ (void)cs_data;
+ return ENOTSUP;
+#else
+ return ibv_query_counter_set(query_attr, cs_data);
+#endif
+}
+
+static void
+mlx5_glue_ack_async_event(struct ibv_async_event *event)
+{
+ ibv_ack_async_event(event);
+}
+
+static int
+mlx5_glue_get_async_event(struct ibv_context *context,
+ struct ibv_async_event *event)
+{
+ return ibv_get_async_event(context, event);
+}
+
+static const char *
+mlx5_glue_port_state_str(enum ibv_port_state port_state)
+{
+ return ibv_port_state_str(port_state);
+}
+
+static struct ibv_cq *
+mlx5_glue_cq_ex_to_cq(struct ibv_cq_ex *cq)
+{
+ return ibv_cq_ex_to_cq(cq);
+}
+
+static struct ibv_cq_ex *
+mlx5_glue_dv_create_cq(struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *cq_attr,
+ struct mlx5dv_cq_init_attr *mlx5_cq_attr)
+{
+ return mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);
+}
+
+static int
+mlx5_glue_dv_query_device(struct ibv_context *ctx,
+ struct mlx5dv_context *attrs_out)
+{
+ return mlx5dv_query_device(ctx, attrs_out);
+}
+
+static int
+mlx5_glue_dv_set_context_attr(struct ibv_context *ibv_ctx,
+ enum mlx5dv_set_ctx_attr_type type, void *attr)
+{
+ return mlx5dv_set_context_attr(ibv_ctx, type, attr);
+}
+
+static int
+mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)
+{
+ return mlx5dv_init_obj(obj, obj_type);
+}
+
+const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
+ .version = MLX5_GLUE_VERSION,
+ .fork_init = mlx5_glue_fork_init,
+ .alloc_pd = mlx5_glue_alloc_pd,
+ .dealloc_pd = mlx5_glue_dealloc_pd,
+ .get_device_list = mlx5_glue_get_device_list,
+ .free_device_list = mlx5_glue_free_device_list,
+ .open_device = mlx5_glue_open_device,
+ .close_device = mlx5_glue_close_device,
+ .query_device = mlx5_glue_query_device,
+ .query_device_ex = mlx5_glue_query_device_ex,
+ .query_port = mlx5_glue_query_port,
+ .create_comp_channel = mlx5_glue_create_comp_channel,
+ .destroy_comp_channel = mlx5_glue_destroy_comp_channel,
+ .create_cq = mlx5_glue_create_cq,
+ .destroy_cq = mlx5_glue_destroy_cq,
+ .get_cq_event = mlx5_glue_get_cq_event,
+ .ack_cq_events = mlx5_glue_ack_cq_events,
+ .create_rwq_ind_table = mlx5_glue_create_rwq_ind_table,
+ .destroy_rwq_ind_table = mlx5_glue_destroy_rwq_ind_table,
+ .create_wq = mlx5_glue_create_wq,
+ .destroy_wq = mlx5_glue_destroy_wq,
+ .modify_wq = mlx5_glue_modify_wq,
+ .create_flow = mlx5_glue_create_flow,
+ .destroy_flow = mlx5_glue_destroy_flow,
+ .create_qp = mlx5_glue_create_qp,
+ .create_qp_ex = mlx5_glue_create_qp_ex,
+ .destroy_qp = mlx5_glue_destroy_qp,
+ .modify_qp = mlx5_glue_modify_qp,
+ .reg_mr = mlx5_glue_reg_mr,
+ .dereg_mr = mlx5_glue_dereg_mr,
+ .create_counter_set = mlx5_glue_create_counter_set,
+ .destroy_counter_set = mlx5_glue_destroy_counter_set,
+ .describe_counter_set = mlx5_glue_describe_counter_set,
+ .query_counter_set = mlx5_glue_query_counter_set,
+ .ack_async_event = mlx5_glue_ack_async_event,
+ .get_async_event = mlx5_glue_get_async_event,
+ .port_state_str = mlx5_glue_port_state_str,
+ .cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
+ .dv_create_cq = mlx5_glue_dv_create_cq,
+ .dv_query_device = mlx5_glue_dv_query_device,
+ .dv_set_context_attr = mlx5_glue_dv_set_context_attr,
+ .dv_init_obj = mlx5_glue_dv_init_obj,
+};
diff --git a/drivers/net/mlx5/mlx5_glue.h b/drivers/net/mlx5/mlx5_glue.h
new file mode 100644
index 00000000..b5efee3b
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_glue.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd.
+ */
+
+#ifndef MLX5_GLUE_H_
+#define MLX5_GLUE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#ifndef MLX5_GLUE_VERSION
+#define MLX5_GLUE_VERSION ""
+#endif
+
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+struct ibv_counter_set;
+struct ibv_counter_set_data;
+struct ibv_counter_set_description;
+struct ibv_counter_set_init_attr;
+struct ibv_query_counter_set_attr;
+#endif
+
+/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
+struct mlx5_glue {
+ const char *version;
+ int (*fork_init)(void);
+ struct ibv_pd *(*alloc_pd)(struct ibv_context *context);
+ int (*dealloc_pd)(struct ibv_pd *pd);
+ struct ibv_device **(*get_device_list)(int *num_devices);
+ void (*free_device_list)(struct ibv_device **list);
+ struct ibv_context *(*open_device)(struct ibv_device *device);
+ int (*close_device)(struct ibv_context *context);
+ int (*query_device)(struct ibv_context *context,
+ struct ibv_device_attr *device_attr);
+ int (*query_device_ex)(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr);
+ int (*query_port)(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr);
+ struct ibv_comp_channel *(*create_comp_channel)
+ (struct ibv_context *context);
+ int (*destroy_comp_channel)(struct ibv_comp_channel *channel);
+ struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
+ void *cq_context,
+ struct ibv_comp_channel *channel,
+ int comp_vector);
+ int (*destroy_cq)(struct ibv_cq *cq);
+ int (*get_cq_event)(struct ibv_comp_channel *channel,
+ struct ibv_cq **cq, void **cq_context);
+ void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);
+ struct ibv_rwq_ind_table *(*create_rwq_ind_table)
+ (struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr);
+ int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
+ struct ibv_wq *(*create_wq)(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr);
+ int (*destroy_wq)(struct ibv_wq *wq);
+ int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
+ struct ibv_flow *(*create_flow)(struct ibv_qp *qp,
+ struct ibv_flow_attr *flow);
+ int (*destroy_flow)(struct ibv_flow *flow_id);
+ struct ibv_qp *(*create_qp)(struct ibv_pd *pd,
+ struct ibv_qp_init_attr *qp_init_attr);
+ struct ibv_qp *(*create_qp_ex)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex);
+ int (*destroy_qp)(struct ibv_qp *qp);
+ int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask);
+ struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,
+ size_t length, int access);
+ int (*dereg_mr)(struct ibv_mr *mr);
+ struct ibv_counter_set *(*create_counter_set)
+ (struct ibv_context *context,
+ struct ibv_counter_set_init_attr *init_attr);
+ int (*destroy_counter_set)(struct ibv_counter_set *cs);
+ int (*describe_counter_set)
+ (struct ibv_context *context,
+ uint16_t counter_set_id,
+ struct ibv_counter_set_description *cs_desc);
+ int (*query_counter_set)(struct ibv_query_counter_set_attr *query_attr,
+ struct ibv_counter_set_data *cs_data);
+ void (*ack_async_event)(struct ibv_async_event *event);
+ int (*get_async_event)(struct ibv_context *context,
+ struct ibv_async_event *event);
+ const char *(*port_state_str)(enum ibv_port_state port_state);
+ struct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);
+ struct ibv_cq_ex *(*dv_create_cq)
+ (struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *cq_attr,
+ struct mlx5dv_cq_init_attr *mlx5_cq_attr);
+ int (*dv_query_device)(struct ibv_context *ctx_in,
+ struct mlx5dv_context *attrs_out);
+ int (*dv_set_context_attr)(struct ibv_context *ibv_ctx,
+ enum mlx5dv_set_ctx_attr_type type,
+ void *attr);
+ int (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);
+};
+
+const struct mlx5_glue *mlx5_glue;
+
+#endif /* MLX5_GLUE_H_ */
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index d17b991e..e8a8d459 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <stddef.h>
@@ -52,7 +24,7 @@
#endif
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_common.h>
#include "mlx5.h"
@@ -93,11 +65,9 @@ priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
void
mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
- if (mlx5_is_secondary())
- return;
assert(index < MLX5_MAX_MAC_ADDRESSES);
memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr));
- if (!dev->data->promiscuous && !dev->data->all_multicast)
+ if (!dev->data->promiscuous)
mlx5_traffic_restart(dev);
}
@@ -124,8 +94,6 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
int ret = 0;
(void)vmdq;
- if (mlx5_is_secondary())
- return 0;
assert(index < MLX5_MAX_MAC_ADDRESSES);
/* First, make sure this address isn't already configured. */
for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) {
@@ -138,7 +106,7 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
return EADDRINUSE;
}
dev->data->mac_addrs[index] = *mac;
- if (!dev->data->promiscuous && !dev->data->all_multicast)
+ if (!dev->data->promiscuous)
mlx5_traffic_restart(dev);
return ret;
}
@@ -154,8 +122,6 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
void
mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
- if (mlx5_is_secondary())
- return;
DEBUG("%p: setting primary MAC address", (void *)dev);
mlx5_mac_addr_add(dev, mac_addr, 0, 0);
}
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 6b29eed5..857dfcd8 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -1,38 +1,8 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
*/
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
@@ -46,6 +16,7 @@
#include "mlx5.h"
#include "mlx5_rxtx.h"
+#include "mlx5_glue.h"
struct mlx5_check_mempool_data {
int ret;
@@ -141,8 +112,15 @@ priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq,
DEBUG("%p: discovered new memory pool \"%s\" (%p)",
(void *)txq_ctrl, mp->name, (void *)mp);
mr = priv_mr_get(priv, mp);
- if (mr == NULL)
+ if (mr == NULL) {
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DEBUG("Using unregistered mempool 0x%p(%s) in secondary process,"
+ " please create mempool before rte_eth_dev_start()",
+ (void *)mp, mp->name);
+ return NULL;
+ }
mr = priv_mr_new(priv, mp);
+ }
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
(void *)txq_ctrl);
@@ -291,6 +269,9 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp)
DEBUG("mempool %p area start=%p end=%p size=%zu",
(void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
+ /* Save original addresses for exact MR lookup. */
+ mr->start = start;
+ mr->end = end;
/* Round start and end to page boundary if found in memory segments. */
for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
uintptr_t addr = (uintptr_t)ms[i].addr;
@@ -305,12 +286,10 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp)
DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
(void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
- mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
- IBV_ACCESS_LOCAL_WRITE);
+ mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start,
+ IBV_ACCESS_LOCAL_WRITE);
mr->mp = mp;
mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
- mr->start = start;
- mr->end = (uintptr_t)mr->mr->addr + mr->mr->length;
rte_atomic32_inc(&mr->refcnt);
DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv,
(void *)mr, rte_atomic32_read(&mr->refcnt));
@@ -364,7 +343,7 @@ priv_mr_release(struct priv *priv, struct mlx5_mr *mr)
DEBUG("Memory Region %p refcnt: %d",
(void *)mr, rte_atomic32_read(&mr->refcnt));
if (rte_atomic32_dec_and_test(&mr->refcnt)) {
- claim_zero(ibv_dereg_mr(mr->mr));
+ claim_zero(mlx5_glue->dereg_mr(mr->mr));
LIST_REMOVE(mr, next);
rte_free(mr);
return 0;
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 2de310bc..9eb9c15e 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
*/
#ifndef RTE_PMD_MLX5_PRM_H_
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index f3de46de..d06b0bee 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <stddef.h>
@@ -48,9 +20,10 @@
#endif
#include <rte_malloc.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "mlx5.h"
+#include "mlx5_defs.h"
#include "mlx5_rxtx.h"
/**
@@ -72,6 +45,10 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
int ret = 0;
priv_lock(priv);
+ if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
+ ret = -EINVAL;
+ goto out;
+ }
if (rss_conf->rss_key && rss_conf->rss_key_len) {
priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key,
rss_conf->rss_key_len, 0);
@@ -274,7 +251,6 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
int ret;
struct priv *priv = dev->data->dev_private;
- assert(!mlx5_is_secondary());
priv_lock(priv);
ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size);
priv_unlock(priv);
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 0ef2cdf0..4ffc869a 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <stddef.h>
@@ -45,7 +17,7 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "mlx5.h"
#include "mlx5_rxtx.h"
@@ -60,8 +32,6 @@
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
- if (mlx5_is_secondary())
- return;
dev->data->promiscuous = 1;
mlx5_traffic_restart(dev);
}
@@ -75,8 +45,6 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
void
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
{
- if (mlx5_is_secondary())
- return;
dev->data->promiscuous = 0;
mlx5_traffic_restart(dev);
}
@@ -90,8 +58,6 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
{
- if (mlx5_is_secondary())
- return;
dev->data->all_multicast = 1;
mlx5_traffic_restart(dev);
}
@@ -105,8 +71,6 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_disable(struct rte_eth_dev *dev)
{
- if (mlx5_is_secondary())
- return;
dev->data->all_multicast = 0;
mlx5_traffic_restart(dev);
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 85399eff..ff58c492 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <stddef.h>
@@ -52,7 +24,7 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_debug.h>
@@ -63,6 +35,7 @@
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
+#include "mlx5_glue.h"
/* Default RSS hash key also used for ConnectX-3. */
uint8_t rss_hash_default_key[] = {
@@ -213,6 +186,78 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
}
/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx5_priv_get_rx_queue_offloads(struct priv *priv)
+{
+ struct mlx5_dev_config *config = &priv->config;
+ uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_TIMESTAMP |
+ DEV_RX_OFFLOAD_JUMBO_FRAME);
+
+ if (config->hw_fcs_strip)
+ offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ if (config->hw_csum)
+ offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ if (config->hw_vlan_strip)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ return offloads;
+}
+
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
+ uint64_t queue_supp_offloads =
+ mlx5_priv_get_rx_queue_offloads(priv);
+ uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv);
+
+ if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+ offloads)
+ return 0;
+ if (((port_offloads ^ offloads) & port_supp_offloads))
+ return 0;
+ return 1;
+}
+
+/**
*
* @param dev
* Pointer to Ethernet device structure.
@@ -241,9 +286,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
int ret = 0;
- (void)conf;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
priv_lock(priv);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
@@ -259,6 +301,16 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
priv_unlock(priv);
return -EOVERFLOW;
}
+ if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
+ ret = ENOTSUP;
+ ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (mlx5_priv_get_rx_port_offloads(priv) |
+ mlx5_priv_get_rx_queue_offloads(priv)));
+ goto out;
+ }
if (!mlx5_priv_rxq_releasable(priv, idx)) {
ret = EBUSY;
ERROR("%p: unable to release queue index %u",
@@ -266,7 +318,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
goto out;
}
mlx5_priv_rxq_release(priv, idx);
- rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, mp);
+ rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp);
if (!rxq_ctrl) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
@@ -294,9 +346,6 @@ mlx5_rx_queue_release(void *dpdk_rxq)
struct mlx5_rxq_ctrl *rxq_ctrl;
struct priv *priv;
- if (mlx5_is_secondary())
- return;
-
if (rxq == NULL)
return;
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
@@ -327,7 +376,6 @@ priv_rx_intr_vec_enable(struct priv *priv)
unsigned int count = 0;
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
- assert(!mlx5_is_secondary());
if (!priv->dev->data->dev_conf.intr_conf.rxq)
return 0;
priv_rx_intr_vec_disable(priv);
@@ -442,7 +490,6 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
doorbell = (uint64_t)doorbell_hi << 32;
doorbell |= rxq->cqn;
rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
- rte_wmb();
rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
}
@@ -460,7 +507,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
@@ -504,7 +551,7 @@ exit:
int
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_rxq_ibv *rxq_ibv = NULL;
@@ -526,13 +573,13 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
ret = EINVAL;
goto exit;
}
- ret = ibv_get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
+ ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
if (ret || ev_cq != rxq_ibv->cq) {
ret = EINVAL;
goto exit;
}
rxq_data->cq_arm_sn++;
- ibv_ack_cq_events(rxq_ibv->cq, 1);
+ mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
exit:
if (rxq_ibv)
mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
@@ -576,9 +623,12 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
unsigned int i;
int ret = 0;
struct mlx5dv_obj obj;
+ struct mlx5_dev_config *config = &priv->config;
assert(rxq_data);
assert(!rxq_ctrl->ibv);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
+ priv->verbs_alloc_ctx.obj = rxq_ctrl;
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
rxq_ctrl->socket);
if (!tmpl) {
@@ -597,7 +647,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
}
}
if (rxq_ctrl->irq) {
- tmpl->channel = ibv_create_comp_channel(priv->ctx);
+ tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
if (!tmpl->channel) {
ERROR("%p: Comp Channel creation failure",
(void *)rxq_ctrl);
@@ -612,7 +662,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
- if (priv->cqe_comp && !rxq_data->hw_timestamp) {
+ if (config->cqe_comp && !rxq_data->hw_timestamp) {
attr.cq.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
@@ -622,11 +672,12 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
*/
if (rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2;
- } else if (priv->cqe_comp && rxq_data->hw_timestamp) {
+ } else if (config->cqe_comp && rxq_data->hw_timestamp) {
DEBUG("Rx CQE compression is disabled for HW timestamp");
}
- tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
- &attr.cq.mlx5));
+ tmpl->cq = mlx5_glue->cq_ex_to_cq
+ (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
+ &attr.cq.mlx5));
if (tmpl->cq == NULL) {
ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
goto error;
@@ -657,12 +708,12 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
- if (priv->hw_padding) {
+ if (config->hw_padding) {
attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
#endif
- tmpl->wq = ibv_create_wq(priv->ctx, &attr.wq);
+ tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
if (tmpl->wq == NULL) {
ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
goto error;
@@ -686,7 +737,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
.attr_mask = IBV_WQ_ATTR_STATE,
.wq_state = IBV_WQS_RDY,
};
- ret = ibv_modify_wq(tmpl->wq, &mod);
+ ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
if (ret) {
ERROR("%p: WQ state to IBV_WQS_RDY failed",
(void *)rxq_ctrl);
@@ -696,7 +747,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
obj.cq.out = &cq_info;
obj.rwq.in = tmpl->wq;
obj.rwq.out = &rwq;
- ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
if (ret != 0)
goto error;
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
@@ -742,16 +793,18 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
(void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
error:
if (tmpl->wq)
- claim_zero(ibv_destroy_wq(tmpl->wq));
+ claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
if (tmpl->cq)
- claim_zero(ibv_destroy_cq(tmpl->cq));
+ claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
if (tmpl->channel)
- claim_zero(ibv_destroy_comp_channel(tmpl->channel));
+ claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
if (tmpl->mr)
priv_mr_release(priv, tmpl->mr);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return NULL;
}
@@ -814,10 +867,11 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
(void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
rxq_free_elts(rxq_ibv->rxq_ctrl);
- claim_zero(ibv_destroy_wq(rxq_ibv->wq));
- claim_zero(ibv_destroy_cq(rxq_ibv->cq));
+ claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
+ claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
if (rxq_ibv->channel)
- claim_zero(ibv_destroy_comp_channel(rxq_ibv->channel));
+ claim_zero(mlx5_glue->destroy_comp_channel
+ (rxq_ibv->channel));
LIST_REMOVE(rxq_ibv, next);
rte_free(rxq_ibv);
return 0;
@@ -880,13 +934,19 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
*/
struct mlx5_rxq_ctrl*
mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket, struct rte_mempool *mp)
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
{
struct rte_eth_dev *dev = priv->dev;
struct mlx5_rxq_ctrl *tmpl;
- const uint16_t desc_n =
- desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ struct mlx5_dev_config *config = &priv->config;
+ /*
+ * Always allocate extra slots, even if eventually
+ * the vector Rx will not be used.
+ */
+ const uint16_t desc_n =
+ desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) +
@@ -902,7 +962,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -944,20 +1004,16 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
goto error;
}
/* Toggle RX checksum offload if hardware supports it. */
- if (priv->hw_csum)
- tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- if (priv->hw_csum_l2tun)
- tmpl->rxq.csum_l2tun =
- !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- tmpl->rxq.hw_timestamp =
- !!dev->data->dev_conf.rxmode.hw_timestamp;
+ tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
+ priv->config.hw_csum_l2tun);
+ tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = (priv->hw_vlan_strip &&
- !!dev->data->dev_conf.rxmode.hw_vlan_strip);
+ tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
- if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
tmpl->rxq.crc_present = 0;
- } else if (priv->hw_fcs_strip) {
+ } else if (config->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
} else {
WARN("%p: CRC stripping has been disabled but will still"
@@ -1121,7 +1177,7 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
struct mlx5_ind_table_ibv *ind_tbl;
const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
- log2above(priv->ind_table_max_size);
+ log2above(priv->config.ind_table_max_size);
struct ibv_wq *wq[1 << wq_n];
unsigned int i;
unsigned int j;
@@ -1143,13 +1199,13 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
/* Finalise indirection table. */
for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
wq[i] = wq[j];
- ind_tbl->ind_table = ibv_create_rwq_ind_table(
- priv->ctx,
- &(struct ibv_rwq_ind_table_init_attr){
+ ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
+ (priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = wq_n,
.ind_tbl = wq,
.comp_mask = 0,
- });
+ });
if (!ind_tbl->ind_table)
goto error;
rte_atomic32_inc(&ind_tbl->refcnt);
@@ -1221,7 +1277,8 @@ mlx5_priv_ind_table_ibv_release(struct priv *priv,
DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
- claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table));
+ claim_zero(mlx5_glue->destroy_rwq_ind_table
+ (ind_tbl->ind_table));
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
@@ -1288,9 +1345,9 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
if (!ind_tbl)
return NULL;
- qp = ibv_create_qp_ex(
- priv->ctx,
- &(struct ibv_qp_init_attr_ex){
+ qp = mlx5_glue->create_qp_ex
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
IBV_QP_INIT_ATTR_PD |
@@ -1304,7 +1361,7 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
},
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
- });
+ });
if (!qp)
goto error;
hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
@@ -1323,7 +1380,7 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
error:
mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
if (qp)
- claim_zero(ibv_destroy_qp(qp));
+ claim_zero(mlx5_glue->destroy_qp(qp));
return NULL;
}
@@ -1391,7 +1448,7 @@ mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
- claim_zero(ibv_destroy_qp(hrxq->qp));
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 2d30c507..dc4ead93 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <assert.h>
@@ -344,15 +316,11 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
unsigned int j = 0;
unsigned int k = 0;
uint16_t max_elts;
- unsigned int max_inline = txq->max_inline;
- const unsigned int inline_en = !!max_inline && txq->inline_en;
uint16_t max_wqe;
unsigned int comp;
- volatile struct mlx5_wqe_v *wqe = NULL;
volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
unsigned int segs_n = 0;
- struct rte_mbuf *buf = NULL;
- uint8_t *raw;
+ const unsigned int max_inline = txq->max_inline;
if (unlikely(!pkts_n))
return 0;
@@ -361,20 +329,24 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
do {
+ struct rte_mbuf *buf = NULL;
+ uint8_t *raw;
+ volatile struct mlx5_wqe_v *wqe = NULL;
volatile rte_v128u32_t *dseg = NULL;
uint32_t length;
unsigned int ds = 0;
unsigned int sg = 0; /* counter of additional segs attached. */
uintptr_t addr;
- uint64_t naddr;
uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
uint16_t tso_header_sz = 0;
uint16_t ehdr;
- uint8_t cs_flags = 0;
+ uint8_t cs_flags;
uint64_t tso = 0;
uint16_t tso_segsz = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
@@ -392,7 +364,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (max_elts < segs_n)
break;
max_elts -= segs_n;
- --segs_n;
+ sg = --segs_n;
if (unlikely(--max_wqe == 0))
break;
wqe = (volatile struct mlx5_wqe_v *)
@@ -417,23 +389,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (pkts_n - i > 1)
rte_prefetch0(
rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
- /* Should we enable HW CKSUM offload */
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- const uint64_t is_tunneled = buf->ol_flags &
- (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
-
- if (is_tunneled && txq->tunnel_en) {
- cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
- MLX5_ETH_WQE_L4_INNER_CSUM;
- if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
- cs_flags |= MLX5_ETH_WQE_L3_CSUM;
- } else {
- cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- }
- }
+ cs_flags = txq_ol_cksum_to_cs(txq, buf);
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
@@ -459,82 +415,72 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
addr += pkt_inline_sz;
}
raw += MLX5_WQE_DWORD_SIZE;
- if (txq->tso_en) {
- tso = buf->ol_flags & PKT_TX_TCP_SEG;
- if (tso) {
- uintptr_t end = (uintptr_t)
- (((uintptr_t)txq->wqes) +
- (1 << txq->wqe_n) *
- MLX5_WQE_SIZE);
- unsigned int copy_b;
- uint8_t vlan_sz = (buf->ol_flags &
- PKT_TX_VLAN_PKT) ? 4 : 0;
- const uint64_t is_tunneled =
- buf->ol_flags &
- (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
-
- tso_header_sz = buf->l2_len + vlan_sz +
- buf->l3_len + buf->l4_len;
- tso_segsz = buf->tso_segsz;
- if (unlikely(tso_segsz == 0)) {
- txq->stats.oerrors++;
- break;
- }
- if (is_tunneled && txq->tunnel_en) {
- tso_header_sz += buf->outer_l2_len +
- buf->outer_l3_len;
- cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
- } else {
- cs_flags |= MLX5_ETH_WQE_L4_CSUM;
- }
- if (unlikely(tso_header_sz >
- MLX5_MAX_TSO_HEADER)) {
- txq->stats.oerrors++;
+ tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
+ if (tso) {
+ uintptr_t end =
+ (uintptr_t)(((uintptr_t)txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+ unsigned int copy_b;
+ uint8_t vlan_sz =
+ (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
+ const uint64_t is_tunneled =
+ buf->ol_flags & (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ tso_header_sz = buf->l2_len + vlan_sz +
+ buf->l3_len + buf->l4_len;
+ tso_segsz = buf->tso_segsz;
+ if (unlikely(tso_segsz == 0)) {
+ txq->stats.oerrors++;
+ break;
+ }
+ if (is_tunneled && txq->tunnel_en) {
+ tso_header_sz += buf->outer_l2_len +
+ buf->outer_l3_len;
+ cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ } else {
+ cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ }
+ if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) {
+ txq->stats.oerrors++;
+ break;
+ }
+ copy_b = tso_header_sz - pkt_inline_sz;
+ /* First seg must contain all headers. */
+ assert(copy_b <= length);
+ if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
+ uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+
+ if (unlikely(max_wqe < n))
break;
- }
- copy_b = tso_header_sz - pkt_inline_sz;
- /* First seg must contain all headers. */
- assert(copy_b <= length);
- if (copy_b &&
- ((end - (uintptr_t)raw) > copy_b)) {
- uint16_t n = (MLX5_WQE_DS(copy_b) -
- 1 + 3) / 4;
-
- if (unlikely(max_wqe < n))
- break;
- max_wqe -= n;
- rte_memcpy((void *)raw,
- (void *)addr, copy_b);
- addr += copy_b;
- length -= copy_b;
- /* Include padding for TSO header. */
- copy_b = MLX5_WQE_DS(copy_b) *
- MLX5_WQE_DWORD_SIZE;
- pkt_inline_sz += copy_b;
- raw += copy_b;
- } else {
- /* NOP WQE. */
- wqe->ctrl = (rte_v128u32_t){
- rte_cpu_to_be_32(
- txq->wqe_ci << 8),
- rte_cpu_to_be_32(
- txq->qp_num_8s | 1),
- 0,
- 0,
- };
- ds = 1;
+ max_wqe -= n;
+ rte_memcpy((void *)raw, (void *)addr, copy_b);
+ addr += copy_b;
+ length -= copy_b;
+ /* Include padding for TSO header. */
+ copy_b = MLX5_WQE_DS(copy_b) *
+ MLX5_WQE_DWORD_SIZE;
+ pkt_inline_sz += copy_b;
+ raw += copy_b;
+ } else {
+ /* NOP WQE. */
+ wqe->ctrl = (rte_v128u32_t){
+ rte_cpu_to_be_32(txq->wqe_ci << 8),
+ rte_cpu_to_be_32(txq->qp_num_8s | 1),
+ 0,
+ 0,
+ };
+ ds = 1;
#ifdef MLX5_PMD_SOFT_COUNTERS
- total_length = 0;
+ total_length = 0;
#endif
- k++;
- goto next_wqe;
- }
+ k++;
+ goto next_wqe;
}
}
/* Inline if enough room. */
- if (inline_en || tso) {
- uint32_t inl;
+ if (max_inline || tso) {
+ uint32_t inl = 0;
uintptr_t end = (uintptr_t)
(((uintptr_t)txq->wqes) +
(1 << txq->wqe_n) * MLX5_WQE_SIZE);
@@ -542,12 +488,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
RTE_CACHE_LINE_SIZE -
(pkt_inline_sz - 2) -
!!tso * sizeof(inl);
- uintptr_t addr_end = (addr + inline_room) &
- ~(RTE_CACHE_LINE_SIZE - 1);
- unsigned int copy_b = (addr_end > addr) ?
- RTE_MIN((addr_end - addr), length) :
- 0;
-
+ uintptr_t addr_end;
+ unsigned int copy_b;
+
+pkt_inline:
+ addr_end = RTE_ALIGN_FLOOR(addr + inline_room,
+ RTE_CACHE_LINE_SIZE);
+ copy_b = (addr_end > addr) ?
+ RTE_MIN((addr_end - addr), length) : 0;
if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
/*
* One Dseg remains in the current WQE. To
@@ -559,7 +507,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (unlikely(max_wqe < n))
break;
max_wqe -= n;
- if (tso) {
+ if (tso && !inl) {
inl = rte_cpu_to_be_32(copy_b |
MLX5_INLINE_SEG);
rte_memcpy((void *)raw,
@@ -594,11 +542,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
} else if (!segs_n) {
goto next_pkt;
} else {
- /* dseg will be advance as part of next_seg */
- dseg = (volatile rte_v128u32_t *)
- ((uintptr_t)wqe +
- ((ds - 1) * MLX5_WQE_DWORD_SIZE));
- goto next_seg;
+ raw += copy_b;
+ inline_room -= copy_b;
+ --segs_n;
+ buf = buf->next;
+ assert(buf);
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ length = DATA_LEN(buf);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ total_length += length;
+#endif
+ (*txq->elts)[++elts_head & elts_m] = buf;
+ goto pkt_inline;
}
} else {
/*
@@ -610,12 +565,12 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
ds = 3;
use_dseg:
/* Add the remaining packet as a simple ds. */
- naddr = rte_cpu_to_be_64(addr);
+ addr = rte_cpu_to_be_64(addr);
*dseg = (rte_v128u32_t){
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
- naddr,
- naddr >> 32,
+ addr,
+ addr >> 32,
};
++ds;
if (!segs_n)
@@ -649,20 +604,16 @@ next_seg:
total_length += length;
#endif
/* Store segment information. */
- naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
+ addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
*dseg = (rte_v128u32_t){
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
- naddr,
- naddr >> 32,
+ addr,
+ addr >> 32,
};
(*txq->elts)[++elts_head & elts_m] = buf;
- ++sg;
- /* Advance counter only if all segs are successfully posted. */
- if (sg < segs_n)
+ if (--segs_n)
goto next_seg;
- else
- j += sg;
next_pkt:
if (ds > MLX5_DSEG_MAX) {
txq->stats.oerrors++;
@@ -671,6 +622,7 @@ next_pkt:
++elts_head;
++pkts;
++i;
+ j += sg;
/* Initialize known and common part of the WQE structure. */
if (tso) {
wqe->ctrl = (rte_v128u32_t){
@@ -722,6 +674,9 @@ next_wqe:
/* Save elts_head in unused "immediate" field of WQE. */
last_wqe->ctrl3 = txq->elts_head;
txq->elts_comp = 0;
+#ifndef NDEBUG
+ ++txq->cq_pi;
+#endif
} else {
txq->elts_comp = comp;
}
@@ -840,6 +795,8 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
@@ -847,7 +804,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
struct rte_mbuf *buf = *(pkts++);
uint32_t length;
unsigned int segs_n = buf->nb_segs;
- uint32_t cs_flags = 0;
+ uint32_t cs_flags;
/*
* Make sure there is enough room to store this packet and
@@ -863,10 +820,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
max_elts -= segs_n;
--pkts_n;
- /* Should we enable HW CKSUM offload */
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
- cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ cs_flags = txq_ol_cksum_to_cs(txq, buf);
/* Retrieve packet information. */
length = PKT_LEN(buf);
assert(length);
@@ -936,6 +890,9 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
+#ifndef NDEBUG
+ ++txq->cq_pi;
+#endif
} else {
txq->elts_comp = comp;
}
@@ -1067,12 +1024,14 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
do {
struct rte_mbuf *buf = *(pkts++);
uintptr_t addr;
uint32_t length;
unsigned int segs_n = buf->nb_segs;
- uint32_t cs_flags = 0;
+ uint8_t cs_flags;
/*
* Make sure there is enough room to store this packet and
@@ -1093,10 +1052,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
* iteration.
*/
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- /* Should we enable HW CKSUM offload */
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
- cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ cs_flags = txq_ol_cksum_to_cs(txq, buf);
/* Retrieve packet information. */
length = PKT_LEN(buf);
/* Start new session if packet differs. */
@@ -1231,6 +1187,9 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
+#ifndef NDEBUG
+ ++txq->cq_pi;
+#endif
} else {
txq->elts_comp = comp;
}
@@ -1317,10 +1276,10 @@ mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
}
/**
- * DPDK callback for TX with Enhanced MPW support.
+ * TX with Enhanced MPW support.
*
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
+ * @param txq
+ * Pointer to TX queue structure.
* @param[in] pkts
* Packets to transmit.
* @param pkts_n
@@ -1329,10 +1288,10 @@ mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
* @return
* Number of packets successfully transmitted (<= pkts_n).
*/
-uint16_t
-mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+static inline uint16_t
+txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -1361,29 +1320,17 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
do {
struct rte_mbuf *buf = *(pkts++);
uintptr_t addr;
- uint64_t naddr;
unsigned int n;
unsigned int do_inline = 0; /* Whether inline is possible. */
uint32_t length;
- unsigned int segs_n = buf->nb_segs;
- uint32_t cs_flags = 0;
+ uint8_t cs_flags;
- /*
- * Make sure there is enough room to store this packet and
- * that one ring entry remains unused.
- */
- assert(segs_n);
- if (max_elts - j < segs_n)
- break;
- /* Do not bother with large packets MPW cannot handle. */
- if (segs_n > MLX5_MPW_DSEG_MAX) {
- txq->stats.oerrors++;
+ /* Multi-segmented packet is handled in slow-path outside. */
+ assert(NB_SEGS(buf) == 1);
+ /* Make sure there is enough room to store this packet. */
+ if (max_elts - j == 0)
break;
- }
- /* Should we enable HW CKSUM offload. */
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
- cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ cs_flags = txq_ol_cksum_to_cs(txq, buf);
/* Retrieve packet information. */
length = PKT_LEN(buf);
/* Start new session if:
@@ -1391,50 +1338,35 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* - no space left even for a dseg
* - next packet can be inlined with a new WQE
* - cs_flag differs
- * It can't be MLX5_MPW_STATE_OPENED as always have a single
- * segmented packet.
*/
if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
- if ((segs_n != 1) ||
- (inl_pad + sizeof(struct mlx5_wqe_data_seg) >
- mpw_room) ||
+ if ((inl_pad + sizeof(struct mlx5_wqe_data_seg) >
+ mpw_room) ||
(length <= txq->inline_max_packet_sz &&
inl_pad + sizeof(inl_hdr) + length >
- mpw_room) ||
+ mpw_room) ||
(mpw.wqe->eseg.cs_flags != cs_flags))
max_wqe -= mlx5_empw_close(txq, &mpw);
}
if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
- if (unlikely(segs_n != 1)) {
- /* Fall back to legacy MPW.
- * A MPW session consumes 2 WQEs at most to
- * include MLX5_MPW_DSEG_MAX pointers.
- */
- if (unlikely(max_wqe < 2))
- break;
- mlx5_mpw_new(txq, &mpw, length);
- } else {
- /* In Enhanced MPW, inline as much as the budget
- * is allowed. The remaining space is to be
- * filled with dsegs. If the title WQEBB isn't
- * padded, it will have 2 dsegs there.
- */
- mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
- (max_inline ? max_inline :
- pkts_n * MLX5_WQE_DWORD_SIZE) +
- MLX5_WQE_SIZE);
- if (unlikely(max_wqe * MLX5_WQE_SIZE <
- mpw_room))
- break;
- /* Don't pad the title WQEBB to not waste WQ. */
- mlx5_empw_new(txq, &mpw, 0);
- mpw_room -= mpw.total_len;
- inl_pad = 0;
- do_inline =
- length <= txq->inline_max_packet_sz &&
- sizeof(inl_hdr) + length <= mpw_room &&
- !txq->mpw_hdr_dseg;
- }
+ /* In Enhanced MPW, inline as much as the budget is
+ * allowed. The remaining space is to be filled with
+ * dsegs. If the title WQEBB isn't padded, it will have
+ * 2 dsegs there.
+ */
+ mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
+ (max_inline ? max_inline :
+ pkts_n * MLX5_WQE_DWORD_SIZE) +
+ MLX5_WQE_SIZE);
+ if (unlikely(max_wqe * MLX5_WQE_SIZE < mpw_room))
+ break;
+ /* Don't pad the title WQEBB to not waste WQ. */
+ mlx5_empw_new(txq, &mpw, 0);
+ mpw_room -= mpw.total_len;
+ inl_pad = 0;
+ do_inline = length <= txq->inline_max_packet_sz &&
+ sizeof(inl_hdr) + length <= mpw_room &&
+ !txq->mpw_hdr_dseg;
mpw.wqe->eseg.cs_flags = cs_flags;
} else {
/* Evaluate whether the next packet can be inlined.
@@ -1450,41 +1382,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
(!txq->mpw_hdr_dseg ||
mpw.total_len >= MLX5_WQE_SIZE);
}
- /* Multi-segment packets must be alone in their MPW. */
- assert((segs_n == 1) || (mpw.pkts_n == 0));
- if (unlikely(mpw.state == MLX5_MPW_STATE_OPENED)) {
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
- length = 0;
-#endif
- do {
- volatile struct mlx5_wqe_data_seg *dseg;
-
- assert(buf);
- (*txq->elts)[elts_head++ & elts_m] = buf;
- dseg = mpw.data.dseg[mpw.pkts_n];
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- *dseg = (struct mlx5_wqe_data_seg){
- .byte_count = rte_cpu_to_be_32(
- DATA_LEN(buf)),
- .lkey = mlx5_tx_mb2mr(txq, buf),
- .addr = rte_cpu_to_be_64(addr),
- };
-#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
- length += DATA_LEN(buf);
-#endif
- buf = buf->next;
- ++j;
- ++mpw.pkts_n;
- } while (--segs_n);
- /* A multi-segmented packet takes one MPW session.
- * TODO: Pack more multi-segmented packets if possible.
- */
- mlx5_mpw_close(txq, &mpw);
- if (mpw.pkts_n < 3)
- max_wqe--;
- else
- max_wqe -= 2;
- } else if (do_inline) {
+ if (do_inline) {
/* Inline packet into WQE. */
unsigned int max;
@@ -1546,12 +1444,12 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
rte_prefetch2((void *)(addr +
n * RTE_CACHE_LINE_SIZE));
- naddr = rte_cpu_to_be_64(addr);
+ addr = rte_cpu_to_be_64(addr);
*dseg = (rte_v128u32_t) {
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
- naddr,
- naddr >> 32,
+ addr,
+ addr >> 32,
};
mpw.data.raw = (volatile void *)(dseg + 1);
mpw.total_len += (inl_pad + sizeof(*dseg));
@@ -1581,7 +1479,9 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
txq->mpw_comp = txq->wqe_ci;
- txq->cq_pi++;
+#ifndef NDEBUG
+ ++txq->cq_pi;
+#endif
} else {
txq->elts_comp += j;
}
@@ -1591,8 +1491,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
#endif
if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
mlx5_empw_close(txq, &mpw);
- else if (mpw.state == MLX5_MPW_STATE_OPENED)
- mlx5_mpw_close(txq, &mpw);
/* Ring QP doorbell. */
mlx5_tx_dbrec(txq, mpw.wqe);
txq->elts_head = elts_head;
@@ -1600,6 +1498,47 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
/**
+ * DPDK callback for TX with Enhanced MPW support.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint16_t n;
+ uint16_t ret;
+
+ n = txq_count_contig_multi_seg(&pkts[nb_tx], pkts_n - nb_tx);
+ if (n) {
+ ret = mlx5_tx_burst(dpdk_txq, &pkts[nb_tx], n);
+ if (!ret)
+ break;
+ nb_tx += ret;
+ }
+ n = txq_count_contig_single_seg(&pkts[nb_tx], pkts_n - nb_tx);
+ if (n) {
+ ret = txq_burst_empw(txq, &pkts[nb_tx], n);
+ if (!ret)
+ break;
+ nb_tx += ret;
+ }
+ }
+ return nb_tx;
+}
+
+/**
* Translate RX completion flags to packet type.
*
* @param[in] cqe
@@ -1702,6 +1641,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
return 0;
++rxq->cq_ci;
op_own = cqe->op_own;
+ rte_cio_rmb();
if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
volatile struct mlx5_mini_cqe8 (*mc)[8] =
(volatile struct mlx5_mini_cqe8 (*)[8])
@@ -1931,9 +1871,9 @@ skip:
return 0;
/* Update the consumer index. */
rxq->rq_ci = rq_ci >> sges_n;
- rte_io_wmb();
+ rte_cio_wmb();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
- rte_io_wmb();
+ rte_cio_wmb();
*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment packets counter. */
@@ -2027,16 +1967,18 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
int __attribute__((weak))
-priv_check_raw_vec_tx_support(struct priv *priv)
+priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
{
(void)priv;
+ (void)dev;
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_tx_support(struct priv *priv)
+priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
{
(void)priv;
+ (void)dev;
return -ENOTSUP;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index d34f3cc0..d7e89055 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#ifndef RTE_PMD_MLX5_RXTX_H_
@@ -114,8 +86,7 @@ struct mlx5_rxq_data {
unsigned int elts_n:4; /* Log 2 of Mbufs. */
unsigned int rss_hash:1; /* RSS hash result is enabled. */
unsigned int mark:1; /* Marked flow available on the queue. */
- unsigned int pending_err:1; /* CQE error needs to be handled. */
- unsigned int :14; /* Remaining bits. */
+ unsigned int :15; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;
@@ -185,13 +156,14 @@ struct mlx5_txq_data {
uint16_t elts_comp; /* Counter since last completion request. */
uint16_t mpw_comp; /* WQ index since last completion request. */
uint16_t cq_ci; /* Consumer index for completion queue. */
+#ifndef NDEBUG
uint16_t cq_pi; /* Producer index for completion queue. */
+#endif
uint16_t wqe_ci; /* Consumer index for work queue. */
uint16_t wqe_pi; /* Producer index for work queue. */
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
- uint16_t inline_en:1; /* When set inline is enabled. */
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
uint16_t tunnel_en:1;
/* When set TX offload for tunneled packets are supported. */
@@ -200,12 +172,12 @@ struct mlx5_txq_data {
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
uint16_t mr_cache_idx; /* Index of last hit entry. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
- uint32_t flags; /* Flags for Tx Queue. */
+ uint64_t offloads; /* Offloads for Tx Queue. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
- volatile void *bf_reg; /* Blueflame register. */
+ volatile void *bf_reg; /* Blueflame register remapped. */
struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
@@ -230,6 +202,7 @@ struct mlx5_txq_ctrl {
struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
struct mlx5_txq_data txq; /* Data path structure. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
+ volatile void *bf_reg_orig; /* Blueflame register from verbs. */
};
/* mlx5_rxq.c */
@@ -252,6 +225,7 @@ int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *);
int mlx5_priv_rxq_ibv_verify(struct priv *);
struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t,
uint16_t, unsigned int,
+ const struct rte_eth_rxconf *,
struct rte_mempool *);
struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t);
int mlx5_priv_rxq_release(struct priv *, uint16_t);
@@ -272,6 +246,8 @@ struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t,
uint64_t, uint16_t [], uint16_t);
int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *);
int mlx5_priv_hrxq_ibv_verify(struct priv *);
+uint64_t mlx5_priv_get_rx_port_offloads(struct priv *);
+uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *);
/* mlx5_txq.c */
@@ -292,6 +268,7 @@ int mlx5_priv_txq_release(struct priv *, uint16_t);
int mlx5_priv_txq_releasable(struct priv *, uint16_t);
int mlx5_priv_txq_verify(struct priv *);
void txq_alloc_elts(struct mlx5_txq_ctrl *);
+uint64_t mlx5_priv_get_tx_port_offloads(struct priv *);
/* mlx5_rxtx.c */
@@ -309,8 +286,8 @@ int mlx5_rx_descriptor_status(void *, uint16_t);
int mlx5_tx_descriptor_status(void *, uint16_t);
/* Vectorized version of mlx5_rxtx.c */
-int priv_check_raw_vec_tx_support(struct priv *);
-int priv_check_vec_tx_support(struct priv *);
+int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *);
+int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *);
int rxq_check_vec_support(struct mlx5_rxq_data *);
int priv_check_vec_rx_support(struct priv *);
uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
@@ -548,23 +525,21 @@ mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
struct mlx5_mr *mr;
assert(i < RTE_DIM(txq->mp2mr));
- if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end >= addr))
+ if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end > addr))
return txq->mp2mr[i]->lkey;
for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i]->mr == NULL)) {
+ if (unlikely(txq->mp2mr[i] == NULL ||
+ txq->mp2mr[i]->mr == NULL)) {
/* Unknown MP, add a new MR for it. */
break;
}
if (txq->mp2mr[i]->start <= addr &&
- txq->mp2mr[i]->end >= addr) {
+ txq->mp2mr[i]->end > addr) {
assert(txq->mp2mr[i]->lkey != (uint32_t)-1);
- assert(rte_cpu_to_be_32(txq->mp2mr[i]->mr->lkey) ==
- txq->mp2mr[i]->lkey);
txq->mr_cache_idx = i;
return txq->mp2mr[i]->lkey;
}
}
- txq->mr_cache_idx = 0;
mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
/*
* Request the reference to use in this queue, the original one is
@@ -572,7 +547,13 @@ mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
*/
if (mr) {
rte_atomic32_inc(&mr->refcnt);
+ txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i;
return mr->lkey;
+ } else {
+ struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
+
+ WARN("Failed to register mempool 0x%p(%s)",
+ (void *)mp, mp->name);
}
return (uint32_t)-1;
}
@@ -594,7 +575,7 @@ mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
volatile uint64_t *src = ((volatile uint64_t *)wqe);
- rte_io_wmb();
+ rte_cio_wmb();
*txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
/* Ensure ordering between DB record and BF copy. */
rte_wmb();
@@ -617,4 +598,89 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
mlx5_tx_dbrec_cond_wmb(txq, wqe, 1);
}
+/**
+ * Convert the Checksum offloads to Verbs.
+ *
+ * @param txq_data
+ * Pointer to the Tx queue.
+ * @param buf
+ * Pointer to the mbuf.
+ *
+ * @return
+ * the converted cs_flags.
+ */
+static __rte_always_inline uint8_t
+txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
+{
+ uint8_t cs_flags = 0;
+
+ /* Should we enable HW CKSUM offload */
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM |
+ PKT_TX_OUTER_IP_CKSUM)) {
+ if (txq_data->tunnel_en &&
+ (buf->ol_flags &
+ (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
+ cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ } else {
+ cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ }
+ }
+ return cs_flags;
+}
+
+/**
+ * Count the number of contiguous single segment packets.
+ *
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ *
+ * @return
+ * Number of contiguous single segment packets.
+ */
+static __rte_always_inline unsigned int
+txq_count_contig_single_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ unsigned int pos;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of contiguous single segment packets. */
+ for (pos = 0; pos < pkts_n; ++pos)
+ if (NB_SEGS(pkts[pos]) > 1)
+ break;
+ return pos;
+}
+
+/**
+ * Count the number of contiguous multi-segment packets.
+ *
+ * @param pkts
+ * Pointer to array of packets.
+ * @param pkts_n
+ * Number of packets.
+ *
+ * @return
+ * Number of contiguous multi-segment packets.
+ */
+static __rte_always_inline unsigned int
+txq_count_contig_multi_seg(struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ unsigned int pos;
+
+ if (!pkts_n)
+ return 0;
+ /* Count the number of contiguous multi-segment packets. */
+ for (pos = 0; pos < pkts_n; ++pos)
+ if (NB_SEGS(pkts[pos]) == 1)
+ break;
+ return pos;
+}
+
#endif /* RTE_PMD_MLX5_RXTX_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index ba6c8cef..b66c2916 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#include <assert.h>
@@ -68,31 +40,6 @@
#endif
/**
- * Count the number of continuous single segment packets.
- *
- * @param pkts
- * Pointer to array of packets.
- * @param pkts_n
- * Number of packets.
- *
- * @return
- * Number of continuous single segment packets.
- */
-static inline unsigned int
-txq_check_multiseg(struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- unsigned int pos;
-
- if (!pkts_n)
- return 0;
- /* Count the number of continuous single segment packets. */
- for (pos = 0; pos < pkts_n; ++pos)
- if (NB_SEGS(pkts[pos]) > 1)
- break;
- return pos;
-}
-
-/**
* Count the number of packets having same ol_flags and calculate cs_flags.
*
* @param txq
@@ -123,24 +70,7 @@ txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
for (pos = 1; pos < pkts_n; ++pos)
if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
break;
- /* Should open another MPW session for the rest. */
- if (pkts[0]->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- const uint64_t is_tunneled =
- pkts[0]->ol_flags &
- (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
-
- if (is_tunneled && txq->tunnel_en) {
- *cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
- MLX5_ETH_WQE_L4_INNER_CSUM;
- if (pkts[0]->ol_flags & PKT_TX_OUTER_IP_CKSUM)
- *cs_flags |= MLX5_ETH_WQE_L3_CSUM;
- } else {
- *cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- }
- }
+ *cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]);
return pos;
}
@@ -202,15 +132,15 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t ret;
/* Transmit multi-seg packets in the head of pkts list. */
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
+ if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
NB_SEGS(pkts[nb_tx]) > 1)
nb_tx += txq_scatter_v(txq,
&pkts[nb_tx],
pkts_n - nb_tx);
n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
- n = txq_check_multiseg(&pkts[nb_tx], n);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+ if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ n = txq_count_contig_single_seg(&pkts[nb_tx], n);
+ if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
nb_tx += ret;
@@ -261,7 +191,6 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
rxq->stats.ipackets -= (pkts_n - n);
rxq->stats.ibytes -= err_bytes;
#endif
- rxq->pending_err = 0;
return n;
}
@@ -283,9 +212,10 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct mlx5_rxq_data *rxq = dpdk_rxq;
uint16_t nb_rx;
+ uint64_t err = 0;
- nb_rx = rxq_burst_v(rxq, pkts, pkts_n);
- if (unlikely(rxq->pending_err))
+ nb_rx = rxq_burst_v(rxq, pkts, pkts_n, &err);
+ if (unlikely(err))
nb_rx = rxq_handle_pending_error(rxq, pkts, nb_rx);
return nb_rx;
}
@@ -295,24 +225,20 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
*
* @param priv
* Pointer to private structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_raw_vec_tx_support(struct priv *priv)
+priv_check_raw_vec_tx_support(__rte_unused struct priv *priv,
+ struct rte_eth_dev *dev)
{
- uint16_t i;
-
- /* All the configured queues should support. */
- for (i = 0; i < priv->txqs_n; ++i) {
- struct mlx5_txq_data *txq = (*priv->txqs)[i];
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
- !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
- break;
- }
- if (i != priv->txqs_n)
+ /* Doesn't support any offload. */
+ if (offloads)
return -ENOTSUP;
return 1;
}
@@ -322,17 +248,21 @@ priv_check_raw_vec_tx_support(struct priv *priv)
*
* @param priv
* Pointer to private structure.
+ * @param dev
+ * Pointer to rte_eth_dev structure.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_vec_tx_support(struct priv *priv)
+priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
{
- if (!priv->tx_vec_en ||
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
+ if (!priv->config.tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
- priv->mps != MLX5_MPW_ENHANCED ||
- priv->tso)
+ priv->config.mps != MLX5_MPW_ENHANCED ||
+ offloads & ~MLX5_VEC_TX_OFFLOAD_CAP)
return -ENOTSUP;
return 1;
}
@@ -352,7 +282,7 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq)
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
+ if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
}
@@ -371,7 +301,7 @@ priv_check_vec_rx_support(struct priv *priv)
{
uint16_t i;
- if (!priv->rx_vec_en)
+ if (!priv->config.rx_vec_en)
return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 1f08ed0b..44856bbf 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#ifndef RTE_PMD_MLX5_RXTX_VEC_H_
@@ -40,6 +12,18 @@
#include "mlx5_autoconf.h"
#include "mlx5_prm.h"
+/* HW checksum offload capabilities of vectorized Tx. */
+#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
+ (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+
+/* HW offload capabilities of vectorized Tx. */
+#define MLX5_VEC_TX_OFFLOAD_CAP \
+ (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
/*
* Compile time sanity check for vectorized functions.
*/
@@ -123,7 +107,7 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
elts_idx = rxq->rq_ci & q_mask;
for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
(*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
- rte_io_wmb();
+ rte_cio_wmb();
*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index c721d80e..bbe1818e 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
@@ -135,6 +107,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
if (unlikely(!pkts_n))
return 0;
for (n = 0; n < pkts_n; ++n) {
@@ -149,7 +123,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
11, 10, 9, 8, /* bswap32 */
12, 13, 14, 15
};
- uint8_t cs_flags = 0;
+ uint8_t cs_flags;
uint16_t max_elts;
uint16_t max_wqe;
uint8x16_t *t_wqe;
@@ -168,22 +142,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
break;
wqe = &((volatile struct mlx5_wqe64 *)
txq->wqes)[wqe_ci & wq_mask].hdr;
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- const uint64_t is_tunneled =
- buf->ol_flags & (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
-
- if (is_tunneled && txq->tunnel_en) {
- cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
- MLX5_ETH_WQE_L4_INNER_CSUM;
- if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
- cs_flags |= MLX5_ETH_WQE_L3_CSUM;
- } else {
- cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- }
- }
+ cs_flags = txq_ol_cksum_to_cs(txq, buf);
/* Title WQEBB pointer. */
t_wqe = (uint8x16_t *)wqe;
dseg = (uint8_t *)(wqe + 1);
@@ -220,7 +179,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
+#ifndef NDEBUG
++txq->cq_pi;
+#endif
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
@@ -233,7 +194,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
* Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
* it returns to make it processed by txq_scatter_v(). All the packets in
* the pkts list should be single segment packets having same offload flags.
- * This must be checked by txq_check_multiseg() and txq_calc_offload().
+ * This must be checked by txq_count_contig_single_seg() and txq_calc_offload().
*
* @param txq
* Pointer to TX queue structure.
@@ -284,6 +245,8 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
if (unlikely(!pkts_n))
@@ -321,7 +284,9 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
} else {
/* Request a completion. */
txq->elts_comp = 0;
+#ifndef NDEBUG
++txq->cq_pi;
+#endif
comp_req = 8;
}
/* Fill CTRL in the header. */
@@ -590,11 +555,15 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
if (rxq->mark) {
const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR);
- const uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
+ uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
+ uint32x4_t invalid_mask;
/* Check if flow tag is non-zero then set PKT_RX_FDIR. */
- ol_flags = vorrq_u32(ol_flags, vbicq_u32(fdir_flags,
- vceqzq_u32(flow_tag)));
+ invalid_mask = vceqzq_u32(flow_tag);
+ ol_flags = vorrq_u32(ol_flags,
+ vbicq_u32(fdir_flags, invalid_mask));
+ /* Mask out invalid entries. */
+ fdir_id_flags = vbicq_u32(fdir_id_flags, invalid_mask);
/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
ol_flags = vorrq_u32(ol_flags,
vbicq_u32(fdir_id_flags,
@@ -665,12 +634,16 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
* Array to store received packets.
* @param pkts_n
* Maximum number of packets in array.
+ * @param[out] err
+ * Pointer to a flag. Set non-zero value if pkts array has at least one error
+ * packet to handle.
*
* @return
* Number of packets received including errors (<= pkts_n).
*/
static inline uint16_t
-rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint64_t *err)
{
const uint16_t q_n = 1 << rxq->cqe_n;
const uint16_t q_mask = q_n - 1;
@@ -813,6 +786,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16x4_t mask;
uint16x4_t byte_cnt;
uint32x4_t ptype_info, flow_tag;
+ register uint64x2_t c0, c1, c2, c3;
uint8_t *p0, *p1, *p2, *p3;
uint8_t *e0 = (void *)&elts[pos]->pkt_len;
uint8_t *e1 = (void *)&elts[pos + 1]->pkt_len;
@@ -829,6 +803,16 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
p1 = p0 + (pkts_n - pos > 1) * sizeof(struct mlx5_cqe);
p2 = p1 + (pkts_n - pos > 2) * sizeof(struct mlx5_cqe);
p3 = p2 + (pkts_n - pos > 3) * sizeof(struct mlx5_cqe);
+ /* B.0 (CQE 3) load a block having op_own. */
+ c3 = vld1q_u64((uint64_t *)(p3 + 48));
+ /* B.0 (CQE 2) load a block having op_own. */
+ c2 = vld1q_u64((uint64_t *)(p2 + 48));
+ /* B.0 (CQE 1) load a block having op_own. */
+ c1 = vld1q_u64((uint64_t *)(p1 + 48));
+ /* B.0 (CQE 0) load a block having op_own. */
+ c0 = vld1q_u64((uint64_t *)(p0 + 48));
+ /* Synchronize for loading the rest of blocks. */
+ rte_cio_rmb();
/* Prefetch next 4 CQEs. */
if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP;
@@ -838,50 +822,46 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rte_prefetch_non_temporal(&cq[next + 3]);
}
__asm__ volatile (
- /* B.1 (CQE 3) load a block having op_own. */
- "ld1 {v19.16b}, [%[p3]] \n\t"
- "sub %[p3], %[p3], #48 \n\t"
- /* B.2 (CQE 3) load the rest blocks. */
+ /* B.1 (CQE 3) load the rest of blocks. */
"ld1 {v16.16b - v18.16b}, [%[p3]] \n\t"
+ /* B.2 (CQE 3) move the block having op_own. */
+ "mov v19.16b, %[c3].16b \n\t"
/* B.3 (CQE 3) extract 16B fields. */
"tbl v23.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.1 (CQE 2) load the rest of blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t"
/* B.4 (CQE 3) adjust CRC length. */
"sub v23.8h, v23.8h, %[crc_adj].8h \n\t"
- /* B.1 (CQE 2) load a block having op_own. */
- "ld1 {v19.16b}, [%[p2]] \n\t"
- "sub %[p2], %[p2], #48 \n\t"
/* C.1 (CQE 3) generate final structure for mbuf. */
"tbl v15.16b, {v23.16b}, %[mb_shuf_m].16b \n\t"
- /* B.2 (CQE 2) load the rest blocks. */
- "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t"
+ /* B.2 (CQE 2) move the block having op_own. */
+ "mov v19.16b, %[c2].16b \n\t"
/* B.3 (CQE 2) extract 16B fields. */
"tbl v22.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.1 (CQE 1) load the rest of blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t"
/* B.4 (CQE 2) adjust CRC length. */
"sub v22.8h, v22.8h, %[crc_adj].8h \n\t"
- /* B.1 (CQE 1) load a block having op_own. */
- "ld1 {v19.16b}, [%[p1]] \n\t"
- "sub %[p1], %[p1], #48 \n\t"
/* C.1 (CQE 2) generate final structure for mbuf. */
"tbl v14.16b, {v22.16b}, %[mb_shuf_m].16b \n\t"
- /* B.2 (CQE 1) load the rest blocks. */
- "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t"
+ /* B.2 (CQE 1) move the block having op_own. */
+ "mov v19.16b, %[c1].16b \n\t"
/* B.3 (CQE 1) extract 16B fields. */
"tbl v21.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.1 (CQE 0) load the rest of blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t"
/* B.4 (CQE 1) adjust CRC length. */
"sub v21.8h, v21.8h, %[crc_adj].8h \n\t"
- /* B.1 (CQE 0) load a block having op_own. */
- "ld1 {v19.16b}, [%[p0]] \n\t"
- "sub %[p0], %[p0], #48 \n\t"
/* C.1 (CQE 1) generate final structure for mbuf. */
"tbl v13.16b, {v21.16b}, %[mb_shuf_m].16b \n\t"
- /* B.2 (CQE 0) load the rest blocks. */
- "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t"
+ /* B.2 (CQE 0) move the block having op_own. */
+ "mov v19.16b, %[c0].16b \n\t"
+ /* A.1 load mbuf pointers. */
+ "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t"
/* B.3 (CQE 0) extract 16B fields. */
"tbl v20.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
/* B.4 (CQE 0) adjust CRC length. */
"sub v20.8h, v20.8h, %[crc_adj].8h \n\t"
- /* A.1 load mbuf pointers. */
- "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t"
/* D.1 extract op_own byte. */
"tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t"
/* C.2 (CQE 3) adjust flow mark. */
@@ -916,9 +896,9 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
[byte_cnt]"=&w"(byte_cnt),
[ptype_info]"=&w"(ptype_info),
[flow_tag]"=&w"(flow_tag)
- :[p3]"r"(p3 + 48), [p2]"r"(p2 + 48),
- [p1]"r"(p1 + 48), [p0]"r"(p0 + 48),
+ :[p3]"r"(p3), [p2]"r"(p2), [p1]"r"(p1), [p0]"r"(p0),
[e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
+ [c3]"w"(c3), [c2]"w"(c2), [c1]"w"(c1), [c0]"w"(c0),
[elts_p]"r"(elts_p),
[pkts_p]"r"(pkts_p),
[cqe_shuf_m]"w"(cqe_shuf_m),
@@ -970,8 +950,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
opcode = vceq_u16(resp_err_check, opcode);
opcode = vbic_u16(opcode, invalid_mask);
/* D.4 mark if any error is set */
- rxq->pending_err |=
- !!vget_lane_u64(vreinterpret_u64_u16(opcode), 0);
+ *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0);
/* C.4 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
opcode, &elts[pos]);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 2b9f1601..c088bcb5 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
@@ -135,6 +107,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
if (unlikely(!pkts_n))
return 0;
for (n = 0; n < pkts_n; ++n) {
@@ -148,7 +122,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
8, 9, 10, 11, /* bswap32 */
4, 5, 6, 7, /* bswap32 */
0, 1, 2, 3 /* bswap32 */);
- uint8_t cs_flags = 0;
+ uint8_t cs_flags;
uint16_t max_elts;
uint16_t max_wqe;
__m128i *t_wqe, *dseg;
@@ -170,22 +144,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
}
wqe = &((volatile struct mlx5_wqe64 *)
txq->wqes)[wqe_ci & wq_mask].hdr;
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- const uint64_t is_tunneled =
- buf->ol_flags & (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
-
- if (is_tunneled && txq->tunnel_en) {
- cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
- MLX5_ETH_WQE_L4_INNER_CSUM;
- if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
- cs_flags |= MLX5_ETH_WQE_L3_CSUM;
- } else {
- cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- }
- }
+ cs_flags = txq_ol_cksum_to_cs(txq, buf);
/* Title WQEBB pointer. */
t_wqe = (__m128i *)wqe;
dseg = (__m128i *)(wqe + 1);
@@ -221,7 +180,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
+#ifndef NDEBUG
++txq->cq_pi;
+#endif
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
@@ -234,7 +195,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
* Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
* it returns to make it processed by txq_scatter_v(). All the packets in
* the pkts list should be single segment packets having same offload flags.
- * This must be checked by txq_check_multiseg() and txq_calc_offload().
+ * This must be checked by txq_count_contig_single_seg() and txq_calc_offload().
*
* @param txq
* Pointer to TX queue structure.
@@ -283,6 +244,8 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
@@ -322,7 +285,9 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
} else {
/* Request a completion. */
txq->elts_comp = 0;
+#ifndef NDEBUG
++txq->cq_pi;
+#endif
comp_req = 8;
}
/* Fill CTRL in the header. */
@@ -591,7 +556,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
_mm_set_epi32(0xffffff00, 0xffffff00,
0xffffff00, 0xffffff00);
const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
- const __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
+ __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
__m128i flow_tag, invalid_mask;
flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
@@ -601,7 +566,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
_mm_andnot_si128(invalid_mask,
fdir_flags));
/* Mask out invalid entries. */
- flow_tag = _mm_andnot_si128(invalid_mask, flow_tag);
+ fdir_id_flags = _mm_andnot_si128(invalid_mask, fdir_id_flags);
/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
ol_flags = _mm_or_si128(ol_flags,
_mm_andnot_si128(
@@ -669,12 +634,16 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
* Array to store received packets.
* @param pkts_n
* Maximum number of packets in array.
+ * @param[out] err
+ * Pointer to a flag. Set non-zero value if pkts array has at least one error
+ * packet to handle.
*
* @return
* Number of packets received including errors (<= pkts_n).
*/
static inline uint16_t
-rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
+ uint64_t *err)
{
const uint16_t q_n = 1 << rxq->cqe_n;
const uint16_t q_mask = q_n - 1;
@@ -836,7 +805,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* B.2 copy mbuf pointers. */
_mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
_mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
- rte_compiler_barrier();
+ rte_cio_rmb();
/* C.1 load remained CQE data and extract necessary fields. */
cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
@@ -936,7 +905,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
opcode = _mm_packs_epi32(opcode, zero);
opcode = _mm_andnot_si128(invalid_mask, opcode);
/* D.4 mark if any error is set */
- rxq->pending_err |= !!_mm_cvtsi128_si64(opcode);
+ *err |= _mm_cvtsi128_si64(opcode);
/* D.5 fill in mbuf - rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (rxq->hw_timestamp) {
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 5cd1ab80..61c1a4a5 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -1,34 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2016 6WIND S.A.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
*/
+
#define _GNU_SOURCE
#include <sys/types.h>
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 5e225d37..378472a7 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -1,40 +1,12 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <linux/sockios.h>
#include <linux/ethtool.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_common.h>
#include <rte_malloc.h>
@@ -122,6 +94,22 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
.dpdk_name = "rx_out_of_buffer",
.ctr_name = "out_of_buffer",
},
+ {
+ .dpdk_name = "tx_packets_phy",
+ .ctr_name = "tx_packets_phy",
+ },
+ {
+ .dpdk_name = "rx_packets_phy",
+ .ctr_name = "rx_packets_phy",
+ },
+ {
+ .dpdk_name = "tx_bytes_phy",
+ .ctr_name = "tx_bytes_phy",
+ },
+ {
+ .dpdk_name = "rx_bytes_phy",
+ .ctr_name = "rx_bytes_phy",
+ },
};
static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
@@ -143,11 +131,9 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats)
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
struct ifreq ifr;
- unsigned int stats_sz = (xstats_ctrl->stats_n * sizeof(uint64_t)) +
- sizeof(struct ethtool_stats);
- struct ethtool_stats et_stats[(stats_sz + (
- sizeof(struct ethtool_stats) - 1)) /
- sizeof(struct ethtool_stats)];
+ unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t);
+ unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz];
+ struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf;
et_stats->cmd = ETHTOOL_GSTATS;
et_stats->n_stats = xstats_ctrl->stats_n;
@@ -321,7 +307,7 @@ priv_xstats_reset(struct priv *priv)
int
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
struct rte_eth_stats tmp = {0};
unsigned int i;
unsigned int idx;
@@ -427,7 +413,7 @@ int
mlx5_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *stats, unsigned int n)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
int ret = xstats_n;
if (n >= xstats_n && stats) {
@@ -457,7 +443,7 @@ mlx5_xstats_get(struct rte_eth_dev *dev,
void
mlx5_xstats_reset(struct rte_eth_dev *dev)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
@@ -489,7 +475,7 @@ int
mlx5_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, unsigned int n)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
if (n >= xstats_n && xstats_names) {
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 5de2d026..f5711a99 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1,39 +1,12 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
+
#include <unistd.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_interrupts.h>
#include <rte_alarm.h>
@@ -64,8 +37,11 @@ priv_txq_start(struct priv *priv)
if (!txq_ctrl)
continue;
- LIST_FOREACH(mr, &priv->mr, next)
+ LIST_FOREACH(mr, &priv->mr, next) {
priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++);
+ if (idx == MLX5_PMD_TX_MP_CACHE)
+ break;
+ }
txq_alloc_elts(txq_ctrl);
txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i);
if (!txq_ctrl->ibv) {
@@ -73,10 +49,13 @@ priv_txq_start(struct priv *priv)
goto error;
}
}
- return -ret;
+ ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd);
+ if (ret)
+ goto error;
+ return ret;
error:
priv_txq_stop(priv);
- return -ret;
+ return ret;
}
static void
@@ -132,9 +111,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
struct mlx5_mr *mr = NULL;
int err;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
-
dev->data->dev_started = 1;
priv_lock(priv);
err = priv_flow_create_drop_queue(priv);
@@ -151,38 +127,29 @@ mlx5_dev_start(struct rte_eth_dev *dev)
(void *)dev, strerror(err));
goto error;
}
- /* Update send callback. */
- priv_dev_select_tx_function(priv, dev);
err = priv_rxq_start(priv);
if (err) {
ERROR("%p: RXQ allocation failed: %s",
(void *)dev, strerror(err));
goto error;
}
- /* Update receive callback. */
- priv_dev_select_rx_function(priv, dev);
- err = priv_dev_traffic_enable(priv, dev);
- if (err) {
- ERROR("%p: an error occurred while configuring control flows:"
- " %s",
- (void *)priv, strerror(err));
- goto error;
- }
- err = priv_flow_start(priv, &priv->flows);
- if (err) {
- ERROR("%p: an error occurred while configuring flows:"
- " %s",
- (void *)priv, strerror(err));
- goto error;
- }
err = priv_rx_intr_vec_enable(priv);
if (err) {
ERROR("%p: RX interrupt vector creation failed",
(void *)priv);
goto error;
}
- priv_dev_interrupt_handler_install(priv, dev);
priv_xstats_init(priv);
+ /* Update link status and Tx/Rx callbacks for the first time. */
+ memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
+ INFO("Forcing port %u link to be up", dev->data->port_id);
+ err = priv_force_link_status_change(priv, ETH_LINK_UP);
+ if (err) {
+ DEBUG("Failed to set port %u link to be up",
+ dev->data->port_id);
+ goto error;
+ }
+ priv_dev_interrupt_handler_install(priv, dev);
priv_unlock(priv);
return 0;
error:
@@ -196,7 +163,7 @@ error:
priv_rxq_stop(priv);
priv_flow_delete_drop_queue(priv);
priv_unlock(priv);
- return -err;
+ return err;
}
/**
@@ -213,9 +180,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
- if (mlx5_is_secondary())
- return;
-
priv_lock(priv);
dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 9c5860ff..ed1c713e 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <stddef.h>
@@ -51,7 +23,7 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_common.h>
#include "mlx5_utils.h"
@@ -59,6 +31,7 @@
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
+#include "mlx5_glue.h"
/**
* Allocate TX queue elements.
@@ -116,6 +89,63 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
}
/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx5_priv_get_tx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT);
+ struct mlx5_dev_config *config = &priv->config;
+
+ if (config->hw_csum)
+ offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if (config->tso)
+ offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (config->tunnel_en) {
+ if (config->hw_csum)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (config->tso)
+ offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ }
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param offloads
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * 1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+ uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
+ uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
+
+ /* There are no Tx offloads which are per queue. */
+ if ((offloads & port_supp_offloads) != offloads)
+ return 0;
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return 0;
+ return 1;
+}
+
+/**
* DPDK callback to configure a TX queue.
*
* @param dev
@@ -142,10 +172,21 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
container_of(txq, struct mlx5_txq_ctrl, txq);
int ret = 0;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
-
priv_lock(priv);
+ /*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+ !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
+ ret = ENOTSUP;
+ ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ mlx5_priv_get_tx_port_offloads(priv));
+ goto out;
+ }
if (desc <= MLX5_TX_COMP_THRESH) {
WARN("%p: number of descriptors requested for TX queue %u"
" must be higher than MLX5_TX_COMP_THRESH, using"
@@ -203,9 +244,6 @@ mlx5_tx_queue_release(void *dpdk_txq)
struct priv *priv;
unsigned int i;
- if (mlx5_is_secondary())
- return;
-
if (txq == NULL)
return;
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
@@ -223,7 +261,9 @@ mlx5_tx_queue_release(void *dpdk_txq)
/**
- * Map locally UAR used in Tx queues for BlueFlame doorbell.
+ * Mmap TX UAR(HW doorbell) pages into reserved UAR address space.
+ * Both primary and secondary process do mmap to make UAR address
+ * aligned.
*
* @param[in] priv
* Pointer to private structure.
@@ -240,11 +280,14 @@ priv_tx_uar_remap(struct priv *priv, int fd)
uintptr_t pages[priv->txqs_n];
unsigned int pages_n = 0;
uintptr_t uar_va;
+ uintptr_t off;
void *addr;
+ void *ret;
struct mlx5_txq_data *txq;
struct mlx5_txq_ctrl *txq_ctrl;
int already_mapped;
size_t page_size = sysconf(_SC_PAGESIZE);
+ int r;
memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
/*
@@ -253,10 +296,14 @@ priv_tx_uar_remap(struct priv *priv, int fd)
* Ref to libmlx5 function: mlx5_init_context()
*/
for (i = 0; i != priv->txqs_n; ++i) {
+ if (!(*priv->txqs)[i])
+ continue;
txq = (*priv->txqs)[i];
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
- uar_va = (uintptr_t)txq_ctrl->txq.bf_reg;
- uar_va = RTE_ALIGN_FLOOR(uar_va, page_size);
+ /* UAR addr form verbs used to find dup and offset in page. */
+ uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
+ off = uar_va & (page_size - 1); /* offset in page. */
+ uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); /* page addr. */
already_mapped = 0;
for (j = 0; j != pages_n; ++j) {
if (pages[j] == uar_va) {
@@ -264,21 +311,54 @@ priv_tx_uar_remap(struct priv *priv, int fd)
break;
}
}
- if (already_mapped)
- continue;
- pages[pages_n++] = uar_va;
- addr = mmap((void *)uar_va, page_size,
- PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
- txq_ctrl->uar_mmap_offset);
- if (addr != (void *)uar_va) {
- ERROR("call to mmap failed on UAR for txq %d\n", i);
- return -1;
+ /* new address in reserved UAR address space. */
+ addr = RTE_PTR_ADD(priv->uar_base,
+ uar_va & (MLX5_UAR_SIZE - 1));
+ if (!already_mapped) {
+ pages[pages_n++] = uar_va;
+ /* fixed mmap to specified address in reserved
+ * address space.
+ */
+ ret = mmap(addr, page_size,
+ PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
+ txq_ctrl->uar_mmap_offset);
+ if (ret != addr) {
+ /* fixed mmap have to return same address */
+ ERROR("call to mmap failed on UAR for txq %d\n",
+ i);
+ r = ENXIO;
+ return r;
+ }
}
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
+ txq_ctrl->txq.bf_reg = RTE_PTR_ADD((void *)addr, off);
+ else
+ assert(txq_ctrl->txq.bf_reg ==
+ RTE_PTR_ADD((void *)addr, off));
}
return 0;
}
/**
+ * Check if the burst function is using eMPW.
+ *
+ * @param tx_pkt_burst
+ * Tx burst function pointer.
+ *
+ * @return
+ * 1 if the burst function is using eMPW, 0 otherwise.
+ */
+static int
+is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
+{
+ if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
+ tx_pkt_burst == mlx5_tx_burst_vec ||
+ tx_pkt_burst == mlx5_tx_burst_empw)
+ return 1;
+ return 0;
+}
+
+/**
* Create the Tx queue Verbs object.
*
* @param priv
@@ -308,9 +388,12 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
struct mlx5dv_cq cq_info;
struct mlx5dv_obj obj;
const int desc = 1 << txq_data->elts_n;
+ eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
int ret = 0;
assert(txq_data);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
+ priv->verbs_alloc_ctx.obj = txq_ctrl;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
goto error;
@@ -322,9 +405,9 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
};
cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
- if (priv->mps == MLX5_MPW_ENHANCED)
+ if (is_empw_burst_func(tx_pkt_burst))
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
- tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
+ tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
ERROR("%p: CQ creation failure", (void *)txq_ctrl);
goto error;
@@ -359,13 +442,13 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
.pd = priv->pd,
.comp_mask = IBV_QP_INIT_ATTR_PD,
};
- if (txq_data->inline_en)
+ if (txq_data->max_inline)
attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
if (txq_data->tso_en) {
attr.init.max_tso_header = txq_ctrl->max_tso_header;
attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
}
- tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
+ tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
ERROR("%p: QP creation failure", (void *)txq_ctrl);
goto error;
@@ -376,7 +459,8 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
/* Primary port number. */
.port_num = priv->port
};
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT));
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
+ (IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
goto error;
@@ -384,13 +468,13 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
attr.mod = (struct ibv_qp_attr){
.qp_state = IBV_QPS_RTR
};
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
goto error;
}
attr.mod.qp_state = IBV_QPS_RTS;
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
goto error;
@@ -405,7 +489,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
obj.cq.out = &cq_info;
obj.qp.in = tmpl.qp;
obj.qp.out = &qp;
- ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
if (ret != 0)
goto error;
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
@@ -418,13 +502,15 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
txq_data->wqes = qp.sq.buf;
txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
- txq_data->bf_reg = qp.bf.reg;
+ txq_ctrl->bf_reg_orig = qp.bf.reg;
txq_data->cq_db = cq_info.dbrec;
txq_data->cqes =
(volatile struct mlx5_cqe (*)[])
(uintptr_t)cq_info.buf;
txq_data->cq_ci = 0;
+#ifndef NDEBUG
txq_data->cq_pi = 0;
+#endif
txq_data->wqe_ci = 0;
txq_data->wqe_pi = 0;
txq_ibv->qp = tmpl.qp;
@@ -439,12 +525,14 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
(void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return txq_ibv;
error:
if (tmpl.cq)
- claim_zero(ibv_destroy_cq(tmpl.cq));
+ claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
if (tmpl.qp)
- claim_zero(ibv_destroy_qp(tmpl.qp));
+ claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return NULL;
}
@@ -497,8 +585,8 @@ mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
(void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
- claim_zero(ibv_destroy_qp(txq_ibv->qp));
- claim_zero(ibv_destroy_cq(txq_ibv->cq));
+ claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
+ claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
LIST_REMOVE(txq_ibv, next);
rte_free(txq_ibv);
return 0;
@@ -545,84 +633,73 @@ mlx5_priv_txq_ibv_verify(struct priv *priv)
}
/**
- * Create a DPDK Tx queue.
+ * Set Tx queue parameters from device configuration.
*
- * @param priv
- * Pointer to private structure.
- * @param idx
- * TX queue index.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- *
- * @return
- * A DPDK queue object on success.
+ * @param txq_ctrl
+ * Pointer to Tx queue control structure.
*/
-struct mlx5_txq_ctrl*
-mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket,
- const struct rte_eth_txconf *conf)
+static void
+txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
{
+ struct priv *priv = txq_ctrl->priv;
+ struct mlx5_dev_config *config = &priv->config;
const unsigned int max_tso_inline =
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
- struct mlx5_txq_ctrl *tmpl;
-
- tmpl = rte_calloc_socket("TXQ", 1,
- sizeof(*tmpl) +
- desc * sizeof(struct rte_mbuf *),
- 0, socket);
- if (!tmpl)
- return NULL;
- assert(desc > MLX5_TX_COMP_THRESH);
- tmpl->txq.flags = conf->txq_flags;
- tmpl->priv = priv;
- tmpl->socket = socket;
- tmpl->txq.elts_n = log2above(desc);
- if (priv->mps == MLX5_MPW_ENHANCED)
- tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
- /* MRs will be registered in mp2mr[] later. */
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.orig_attr.max_sge);
- if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
+ unsigned int txq_inline;
+ unsigned int txqs_inline;
+ unsigned int inline_max_packet_sz;
+ eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
+ int is_empw_func = is_empw_burst_func(tx_pkt_burst);
+ int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
+
+ txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txq_inline;
+ txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
+ 0 : config->txqs_inline;
+ inline_max_packet_sz =
+ (config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
+ 0 : config->inline_max_packet_sz;
+ if (is_empw_func) {
+ if (config->txq_inline == MLX5_ARG_UNSET)
+ txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
+ if (config->txqs_inline == MLX5_ARG_UNSET)
+ txqs_inline = MLX5_EMPW_MIN_TXQS;
+ if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
+ inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
+ txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
+ txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
+ }
+ if (txq_inline && priv->txqs_n >= txqs_inline) {
unsigned int ds_cnt;
- tmpl->txq.max_inline =
- ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
+ txq_ctrl->txq.max_inline =
+ ((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
- tmpl->txq.inline_en = 1;
- /* TSO and MPS can't be enabled concurrently. */
- assert(!priv->tso || !priv->mps);
- if (priv->mps == MLX5_MPW_ENHANCED) {
- tmpl->txq.inline_max_packet_sz =
- priv->inline_max_packet_sz;
+ if (is_empw_func) {
/* To minimize the size of data set, avoid requesting
* too large WQ.
*/
- tmpl->max_inline_data =
- ((RTE_MIN(priv->txq_inline,
- priv->inline_max_packet_sz) +
+ txq_ctrl->max_inline_data =
+ ((RTE_MIN(txq_inline,
+ inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (priv->tso) {
- int inline_diff = tmpl->txq.max_inline - max_tso_inline;
+ } else if (tso) {
+ int inline_diff = txq_ctrl->txq.max_inline -
+ max_tso_inline;
/*
* Adjust inline value as Verbs aggregates
* tso_inline and txq_inline fields.
*/
- tmpl->max_inline_data = inline_diff > 0 ?
+ txq_ctrl->max_inline_data = inline_diff > 0 ?
inline_diff *
RTE_CACHE_LINE_SIZE :
0;
} else {
- tmpl->max_inline_data =
- tmpl->txq.max_inline * RTE_CACHE_LINE_SIZE;
+ txq_ctrl->max_inline_data =
+ txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
}
/*
* Check if the inline size is too large in a way which
@@ -632,7 +709,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
* WQE ETH (1 DS)
* Inline part (N DS)
*/
- ds_cnt = 2 + (tmpl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
+ ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
if (ds_cnt > MLX5_DSEG_MAX) {
unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
MLX5_WQE_DWORD_SIZE;
@@ -641,18 +718,61 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
RTE_CACHE_LINE_SIZE);
WARN("txq inline is too large (%d) setting it to "
"the maximum possible: %d\n",
- priv->txq_inline, max_inline);
- tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
+ txq_inline, max_inline);
+ txq_ctrl->txq.max_inline = max_inline /
+ RTE_CACHE_LINE_SIZE;
}
}
- if (priv->tso) {
- tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
- tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
- max_tso_inline);
- tmpl->txq.tso_en = 1;
+ if (tso) {
+ txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
+ txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
+ max_tso_inline);
+ txq_ctrl->txq.tso_en = 1;
}
- if (priv->tunnel_en)
- tmpl->txq.tunnel_en = 1;
+ txq_ctrl->txq.tunnel_en = config->tunnel_en;
+}
+
+/**
+ * Create a DPDK Tx queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * TX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ *
+ * @return
+ * A DPDK queue object on success.
+ */
+struct mlx5_txq_ctrl*
+mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
+ unsigned int socket,
+ const struct rte_eth_txconf *conf)
+{
+ struct mlx5_txq_ctrl *tmpl;
+
+ tmpl = rte_calloc_socket("TXQ", 1,
+ sizeof(*tmpl) +
+ desc * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (!tmpl)
+ return NULL;
+ assert(desc > MLX5_TX_COMP_THRESH);
+ tmpl->txq.offloads = conf->offloads;
+ tmpl->priv = priv;
+ tmpl->socket = socket;
+ tmpl->txq.elts_n = log2above(desc);
+ txq_set_params(tmpl);
+ /* MRs will be registered in mp2mr[] later. */
+ DEBUG("priv->device_attr.max_qp_wr is %d",
+ priv->device_attr.orig_attr.max_qp_wr);
+ DEBUG("priv->device_attr.max_sge is %d",
+ priv->device_attr.orig_attr.max_sge);
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
@@ -717,6 +837,7 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
{
unsigned int i;
struct mlx5_txq_ctrl *txq;
+ size_t page_size = sysconf(_SC_PAGESIZE);
if (!(*priv->txqs)[idx])
return 0;
@@ -736,6 +857,9 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
txq->txq.mp2mr[i] = NULL;
}
}
+ if (priv->uar_base)
+ munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
+ page_size), page_size);
if (rte_atomic32_dec_and_test(&txq->refcnt)) {
txq_free_elts(txq);
LIST_REMOVE(txq, next);
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 2fbd10b1..e1bfb9cd 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#ifndef RTE_PMD_MLX5_UTILS_H_
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 6fc315ef..75c34562 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <stddef.h>
@@ -36,12 +8,23 @@
#include <assert.h>
#include <stdint.h>
-#include <rte_ethdev.h>
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_ethdev_driver.h>
#include <rte_common.h>
#include "mlx5_utils.h"
#include "mlx5.h"
#include "mlx5_autoconf.h"
+#include "mlx5_glue.h"
/**
* DPDK callback to configure a VLAN filter.
@@ -127,13 +110,18 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
DEBUG("set VLAN offloads 0x%x for port %d queue %d",
vlan_offloads, rxq->port_id, idx);
+ if (!rxq_ctrl->ibv) {
+ /* Update related bits in RX queue. */
+ rxq->vlan_strip = !!on;
+ return;
+ }
mod = (struct ibv_wq_attr){
.attr_mask = IBV_WQ_ATTR_FLAGS,
.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
.flags = vlan_offloads,
};
- err = ibv_modify_wq(rxq_ctrl->ibv->wq, &mod);
+ err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
if (err) {
ERROR("%p: failed to modified stripping mode: %s",
(void *)priv, strerror(err));
@@ -160,7 +148,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
struct priv *priv = dev->data->dev_private;
/* Validate hw support */
- if (!priv->hw_vlan_strip) {
+ if (!priv->config.hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
return;
}
@@ -191,9 +179,10 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
unsigned int i;
if (mask & ETH_VLAN_STRIP_MASK) {
- int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip;
+ int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP);
- if (!priv->hw_vlan_strip) {
+ if (!priv->config.hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
return 0;
}
diff --git a/drivers/net/mrvl/Makefile b/drivers/net/mrvl/Makefile
index 815c3bae..f75e53c6 100644
--- a/drivers/net/mrvl/Makefile
+++ b/drivers/net/mrvl/Makefile
@@ -51,8 +51,8 @@ EXPORT_MAP := rte_pmd_mrvl_version.map
# external library dependencies
CFLAGS += -I$(LIBMUSDK_PATH)/include
-CFLAGS += -DMVCONF_ARCH_DMA_ADDR_T_64BIT
-CFLAGS += -DCONF_PP2_BPOOL_COOKIE_SIZE=32
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -O3
LDLIBS += -L$(LIBMUSDK_PATH)/lib
diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mrvl/mrvl_ethdev.c
index 29361652..705c4bd8 100644
--- a/drivers/net/mrvl/mrvl_ethdev.c
+++ b/drivers/net/mrvl/mrvl_ethdev.c
@@ -32,7 +32,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_kvargs.h>
#include <rte_log.h>
#include <rte_malloc.h>
@@ -47,10 +47,6 @@
#undef container_of
#endif
-#include <drivers/mv_pp2.h>
-#include <drivers/mv_pp2_bpool.h>
-#include <drivers/mv_pp2_hif.h>
-
#include <fcntl.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
@@ -98,6 +94,17 @@
/* Memory size (in bytes) for MUSDK dma buffers */
#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+/** Port Rx offload capabilities */
+#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CRC_STRIP | \
+ DEV_RX_OFFLOAD_CHECKSUM)
+
+/** Port Tx offloads capabilities */
+#define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM)
+
static const char * const valid_args[] = {
MRVL_IFACE_NAME_ARG,
MRVL_CFG_ARG,
@@ -115,6 +122,11 @@ struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
+struct mrvl_ifnames {
+ const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
+ int idx;
+};
+
/*
* To use buffer harvesting based on loopback port shadow queue structure
* was introduced for buffers information bookkeeping.
@@ -149,21 +161,17 @@ struct mrvl_txq {
int queue_id;
int port_id;
uint64_t bytes_sent;
+ struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
};
-/*
- * Every tx queue should have dedicated shadow tx queue.
- *
- * Ports assigned by DPDK might not start at zero or be continuous so
- * as a workaround define shadow queues for each possible port so that
- * we eventually fit somewhere.
- */
-struct mrvl_shadow_txq shadow_txqs[RTE_MAX_ETHPORTS][RTE_MAX_LCORE];
-
-/** Number of ports configured. */
-int mrvl_ports_nb;
static int mrvl_lcore_first;
static int mrvl_lcore_last;
+static int mrvl_dev_num;
+
+static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
+static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
+ struct pp2_hif *hif, unsigned int core_id,
+ struct mrvl_shadow_txq *sq, int qid, int force);
static inline int
mrvl_get_bpool_size(int pp2_id, int pool_id)
@@ -190,6 +198,59 @@ mrvl_reserve_bit(int *bitmap, int max)
return n;
}
+static int
+mrvl_init_hif(int core_id)
+{
+ struct pp2_hif_params params;
+ char match[MRVL_MATCH_LEN];
+ int ret;
+
+ ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ return ret;
+ }
+
+ snprintf(match, sizeof(match), "hif-%d", ret);
+ memset(&params, 0, sizeof(params));
+ params.match = match;
+ params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
+ ret = pp2_hif_init(&params, &hifs[core_id]);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct pp2_hif*
+mrvl_get_hif(struct mrvl_priv *priv, int core_id)
+{
+ int ret;
+
+ if (likely(hifs[core_id] != NULL))
+ return hifs[core_id];
+
+ rte_spinlock_lock(&priv->lock);
+
+ ret = mrvl_init_hif(core_id);
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ goto out;
+ }
+
+ if (core_id < mrvl_lcore_first)
+ mrvl_lcore_first = core_id;
+
+ if (core_id > mrvl_lcore_last)
+ mrvl_lcore_last = core_id;
+out:
+ rte_spinlock_unlock(&priv->lock);
+
+ return hifs[core_id];
+}
+
/**
* Configure rss based on dpdk rss configuration.
*
@@ -252,13 +313,13 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
RTE_LOG(INFO, PMD,
"L2 CRC stripping is always enabled in hw\n");
- dev->data->dev_conf.rxmode.hw_strip_crc = 1;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- if (dev->data->dev_conf.rxmode.hw_vlan_strip) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
return -EINVAL;
}
@@ -268,17 +329,17 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.enable_lro) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
RTE_LOG(INFO, PMD, "LRO not supported\n");
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.jumbo_frame)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
ETHER_HDR_LEN - ETHER_CRC_LEN;
@@ -328,6 +389,9 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
return -EINVAL;
+ if (!priv->ppio)
+ return 0;
+
ret = pp2_ppio_set_mru(priv->ppio, mru);
if (ret)
return ret;
@@ -350,6 +414,9 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
+ if (!priv->ppio)
+ return -EPERM;
+
ret = pp2_ppio_enable(priv->ppio);
if (ret)
return ret;
@@ -365,8 +432,6 @@ mrvl_dev_set_link_up(struct rte_eth_dev *dev)
if (ret)
pp2_ppio_disable(priv->ppio);
- dev->data->dev_link.link_status = ETH_LINK_UP;
-
return ret;
}
@@ -383,15 +448,11 @@ static int
mrvl_dev_set_link_down(struct rte_eth_dev *dev)
{
struct mrvl_priv *priv = dev->data->dev_private;
- int ret;
- ret = pp2_ppio_disable(priv->ppio);
- if (ret)
- return ret;
+ if (!priv->ppio)
+ return -EPERM;
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
-
- return ret;
+ return pp2_ppio_disable(priv->ppio);
}
/**
@@ -408,20 +469,13 @@ mrvl_dev_start(struct rte_eth_dev *dev)
{
struct mrvl_priv *priv = dev->data->dev_private;
char match[MRVL_MATCH_LEN];
- int ret;
+ int ret = 0, def_init_size;
snprintf(match, sizeof(match), "ppio-%d:%d",
priv->pp_id, priv->ppio_id);
priv->ppio_params.match = match;
/*
- * Calculate the maximum bpool size for refill feature to 1.5 of the
- * configured size. In case the bpool size will exceed this value,
- * superfluous buffers will be removed
- */
- priv->bpool_max_size = priv->bpool_init_size +
- (priv->bpool_init_size >> 1);
- /*
* Calculate the minimum bpool size for refill feature as follows:
* 2 default burst sizes multiply by number of rx queues.
* If the bpool size will be below this value, new buffers will
@@ -429,9 +483,34 @@ mrvl_dev_start(struct rte_eth_dev *dev)
*/
priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
+ /* In case initial bpool size configured in queues setup is
+ * smaller than minimum size add more buffers
+ */
+ def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
+ if (priv->bpool_init_size < def_init_size) {
+ int buffs_to_add = def_init_size - priv->bpool_init_size;
+
+ priv->bpool_init_size += buffs_to_add;
+ ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n");
+ }
+
+ /*
+ * Calculate the maximum bpool size for refill feature as follows:
+ * maximum number of descriptors in rx queue multiply by number
+ * of rx queues plus minimum bpool size.
+ * In case the bpool size will exceed this value, superfluous buffers
+ * will be removed
+ */
+ priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
+ priv->bpool_min_size;
+
ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
- if (ret)
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to init ppio\n");
return ret;
+ }
/*
* In case there are some some stale uc/mc mac addresses flush them
@@ -466,17 +545,20 @@ mrvl_dev_start(struct rte_eth_dev *dev)
if (mrvl_qos_cfg) {
ret = mrvl_start_qos_mapping(priv);
if (ret) {
- pp2_ppio_deinit(priv->ppio);
- return ret;
+ RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n");
+ goto out;
}
}
ret = mrvl_dev_set_link_up(dev);
- if (ret)
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failed to set link up\n");
goto out;
+ }
return 0;
out:
+ RTE_LOG(ERR, PMD, "Failed to start device\n");
pp2_ppio_deinit(priv->ppio);
return ret;
}
@@ -518,21 +600,32 @@ mrvl_flush_rx_queues(struct rte_eth_dev *dev)
static void
mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
{
- int i;
+ int i, j;
+ struct mrvl_txq *txq;
RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- struct mrvl_shadow_txq *sq =
- &shadow_txqs[dev->data->port_id][i];
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = (struct mrvl_txq *)dev->data->tx_queues[i];
- while (sq->tail != sq->head) {
- uint64_t addr = cookie_addr_high |
+ for (j = 0; j < RTE_MAX_LCORE; j++) {
+ struct mrvl_shadow_txq *sq;
+
+ if (!hifs[j])
+ continue;
+
+ sq = &txq->shadow_txqs[j];
+ mrvl_free_sent_buffers(txq->priv->ppio,
+ hifs[j], j, sq, txq->queue_id, 1);
+ while (sq->tail != sq->head) {
+ uint64_t addr = cookie_addr_high |
sq->ent[sq->tail].buff.cookie;
- rte_pktmbuf_free((struct rte_mbuf *)addr);
- sq->tail = (sq->tail + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
+ rte_pktmbuf_free(
+ (struct rte_mbuf *)addr);
+ sq->tail = (sq->tail + 1) &
+ MRVL_PP2_TX_SHADOWQ_MASK;
+ }
+ memset(sq, 0, sizeof(*sq));
}
-
- memset(sq, 0, sizeof(*sq));
}
}
@@ -546,8 +639,15 @@ static void
mrvl_flush_bpool(struct rte_eth_dev *dev)
{
struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_hif *hif;
uint32_t num;
int ret;
+ unsigned int core_id = rte_lcore_id();
+
+ if (core_id == LCORE_ID_ANY)
+ core_id = 0;
+
+ hif = mrvl_get_hif(priv, core_id);
ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
if (ret) {
@@ -559,8 +659,7 @@ mrvl_flush_bpool(struct rte_eth_dev *dev)
struct pp2_buff_inf inf;
uint64_t addr;
- ret = pp2_bpool_get_buff(hifs[rte_lcore_id()], priv->bpool,
- &inf);
+ ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
if (ret)
break;
@@ -583,8 +682,10 @@ mrvl_dev_stop(struct rte_eth_dev *dev)
mrvl_dev_set_link_down(dev);
mrvl_flush_rx_queues(dev);
mrvl_flush_tx_shadow_queues(dev);
- if (priv->qos_tbl)
+ if (priv->qos_tbl) {
pp2_cls_qos_tbl_deinit(priv->qos_tbl);
+ priv->qos_tbl = NULL;
+ }
pp2_ppio_deinit(priv->ppio);
priv->ppio = NULL;
}
@@ -632,9 +733,13 @@ mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
* TODO
* once MUSDK provides necessary API use it here
*/
+ struct mrvl_priv *priv = dev->data->dev_private;
struct ethtool_cmd edata;
struct ifreq req;
- int ret, fd;
+ int ret, fd, link_up;
+
+ if (!priv->ppio)
+ return -EPERM;
edata.cmd = ETHTOOL_GSET;
@@ -674,6 +779,8 @@ mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
ETH_LINK_HALF_DUPLEX;
dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
ETH_LINK_FIXED;
+ pp2_ppio_get_link_state(priv->ppio, &link_up);
+ dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
return 0;
}
@@ -690,7 +797,10 @@ mrvl_promiscuous_enable(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
- ret = pp2_ppio_set_uc_promisc(priv->ppio, 1);
+ if (!priv->ppio)
+ return;
+
+ ret = pp2_ppio_set_promisc(priv->ppio, 1);
if (ret)
RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
}
@@ -707,6 +817,9 @@ mrvl_allmulticast_enable(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
+ if (!priv->ppio)
+ return;
+
ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
if (ret)
RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
@@ -724,7 +837,10 @@ mrvl_promiscuous_disable(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
- ret = pp2_ppio_set_uc_promisc(priv->ppio, 0);
+ if (!priv->ppio)
+ return;
+
+ ret = pp2_ppio_set_promisc(priv->ppio, 0);
if (ret)
RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
}
@@ -741,6 +857,9 @@ mrvl_allmulticast_disable(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
+ if (!priv->ppio)
+ return;
+
ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
if (ret)
RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
@@ -761,6 +880,9 @@ mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
char buf[ETHER_ADDR_FMT_SIZE];
int ret;
+ if (!priv->ppio)
+ return;
+
ret = pp2_ppio_remove_mac_addr(priv->ppio,
dev->data->mac_addrs[index].addr_bytes);
if (ret) {
@@ -797,6 +919,9 @@ mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
/* For setting index 0, mrvl_mac_addr_set() should be used.*/
return -1;
+ if (!priv->ppio)
+ return 0;
+
/*
* Maximum number of uc addresses can be tuned via kernel module mvpp2x
* parameter uc_filter_max. Maximum number of mc addresses is then
@@ -832,15 +957,17 @@ static void
mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
- pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
- /*
- * TODO
- * Port stops sending packets if pp2_ppio_set_mac_addr()
- * was called after pp2_ppio_enable(). As a quick fix issue
- * enable port once again.
- */
- pp2_ppio_enable(priv->ppio);
+ if (!priv->ppio)
+ return;
+
+ ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
+ if (ret) {
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, sizeof(buf), mac_addr);
+ RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf);
+ }
}
/**
@@ -862,6 +989,9 @@ mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
uint64_t drop_mac = 0;
unsigned int i, idx, ret;
+ if (!priv->ppio)
+ return -EPERM;
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct mrvl_rxq *rxq = dev->data->rx_queues[i];
struct pp2_ppio_inq_statistics rx_stats;
@@ -954,6 +1084,9 @@ mrvl_stats_reset(struct rte_eth_dev *dev)
struct mrvl_priv *priv = dev->data->dev_private;
int i;
+ if (!priv->ppio)
+ return;
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct mrvl_rxq *rxq = dev->data->rx_queues[i];
@@ -1002,15 +1135,11 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
- info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ info->rx_offload_capa = MRVL_RX_OFFLOADS;
+ info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
- info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ info->tx_offload_capa = MRVL_TX_OFFLOADS;
+ info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
info->flow_type_rss_offloads = ETH_RSS_IPV4 |
ETH_RSS_NONFRAG_IPV4_TCP |
@@ -1018,6 +1147,7 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
/* By default packets are dropped if no descriptors are available */
info->default_rxconf.rx_drop_en = 1;
+ info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP;
info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
}
@@ -1110,6 +1240,9 @@ mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct mrvl_priv *priv = dev->data->dev_private;
+ if (!priv->ppio)
+ return -EPERM;
+
return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
pp2_ppio_remove_vlan(priv->ppio, vlan_id);
}
@@ -1131,9 +1264,19 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
int i, ret;
- unsigned int core_id = rte_lcore_id();
- struct pp2_hif *hif = hifs[core_id];
- struct pp2_bpool *bpool = rxq->priv->bpool;
+ unsigned int core_id;
+ struct pp2_hif *hif;
+ struct pp2_bpool *bpool;
+
+ core_id = rte_lcore_id();
+ if (core_id == LCORE_ID_ANY)
+ core_id = 0;
+
+ hif = mrvl_get_hif(rxq->priv, core_id);
+ if (!hif)
+ return -1;
+
+ bpool = rxq->priv->bpool;
ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
if (ret)
@@ -1173,6 +1316,42 @@ out:
}
/**
+ * Check whether requested rx queue offloads match port offloads.
+ *
+ * @param
+ * dev Pointer to the device.
+ * @param
+ * requested Bitmap of the requested offloads.
+ *
+ * @return
+ * 1 if requested offloads are okay, 0 otherwise.
+ */
+static int
+mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported = MRVL_RX_OFFLOADS;
+ uint64_t unsupported = requested & ~supported;
+ uint64_t missing = mandatory & ~requested;
+
+ if (unsupported) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
+ "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
+ requested, supported);
+ return 0;
+ }
+
+ if (missing) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
+ "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
+ requested, missing);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
* DPDK callback to configure the receive queue.
*
* @param dev
@@ -1184,7 +1363,7 @@ out:
* @param socket
* NUMA socket on which memory must be allocated.
* @param conf
- * Thresholds parameters (unused_).
+ * Thresholds parameters.
* @param mp
* Memory pool for buffer allocations.
*
@@ -1194,7 +1373,7 @@ out:
static int
mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket,
- const struct rte_eth_rxconf *conf __rte_unused,
+ const struct rte_eth_rxconf *conf,
struct rte_mempool *mp)
{
struct mrvl_priv *priv = dev->data->dev_private;
@@ -1203,6 +1382,9 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
int ret, tc, inq;
+ if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
+ return -ENOTSUP;
+
if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
/*
* Unknown TC mapping, mapping will not have a correct queue.
@@ -1234,7 +1416,8 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rxq->priv = priv;
rxq->mp = mp;
- rxq->cksum_enabled = dev->data->dev_conf.rxmode.hw_ip_checksum;
+ rxq->cksum_enabled =
+ dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
rxq->queue_id = idx;
rxq->port_id = dev->data->port_id;
mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1269,8 +1452,15 @@ mrvl_rx_queue_release(void *rxq)
struct mrvl_rxq *q = rxq;
struct pp2_ppio_tc_params *tc_params;
int i, num, tc, inq;
+ struct pp2_hif *hif;
+ unsigned int core_id = rte_lcore_id();
- if (!q)
+ if (core_id == LCORE_ID_ANY)
+ core_id = 0;
+
+ hif = mrvl_get_hif(q->priv, core_id);
+
+ if (!q || !hif)
return;
tc = q->priv->rxq_map[q->queue_id].tc;
@@ -1281,7 +1471,7 @@ mrvl_rx_queue_release(void *rxq)
struct pp2_buff_inf inf;
uint64_t addr;
- pp2_bpool_get_buff(hifs[rte_lcore_id()], q->priv->bpool, &inf);
+ pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
addr = cookie_addr_high | inf.cookie;
rte_pktmbuf_free((struct rte_mbuf *)addr);
}
@@ -1290,6 +1480,42 @@ mrvl_rx_queue_release(void *rxq)
}
/**
+ * Check whether requested tx queue offloads match port offloads.
+ *
+ * @param
+ * dev Pointer to the device.
+ * @param
+ * requested Bitmap of the requested offloads.
+ *
+ * @return
+ * 1 if requested offloads are okay, 0 otherwise.
+ */
+static int
+mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+ uint64_t supported = MRVL_TX_OFFLOADS;
+ uint64_t unsupported = requested & ~supported;
+ uint64_t missing = mandatory & ~requested;
+
+ if (unsupported) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
+ "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
+ requested, supported);
+ return 0;
+ }
+
+ if (missing) {
+ RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
+ "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
+ requested, missing);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
* DPDK callback to configure the transmit queue.
*
* @param dev
@@ -1301,7 +1527,7 @@ mrvl_rx_queue_release(void *rxq)
* @param socket
* NUMA socket on which memory must be allocated.
* @param conf
- * Thresholds parameters (unused).
+ * Thresholds parameters.
*
* @return
* 0 on success, negative error value otherwise.
@@ -1309,11 +1535,14 @@ mrvl_rx_queue_release(void *rxq)
static int
mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket,
- const struct rte_eth_txconf *conf __rte_unused)
+ const struct rte_eth_txconf *conf)
{
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_txq *txq;
+ if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
+ return -ENOTSUP;
+
if (dev->data->tx_queues[idx]) {
rte_free(dev->data->tx_queues[idx]);
dev->data->tx_queues[idx] = NULL;
@@ -1557,9 +1786,12 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
struct pp2_bpool *bpool;
int i, ret, rx_done = 0;
int num;
+ struct pp2_hif *hif;
unsigned int core_id = rte_lcore_id();
- if (unlikely(!q->priv->ppio))
+ hif = mrvl_get_hif(q->priv, core_id);
+
+ if (unlikely(!q->priv->ppio || !hif))
return 0;
bpool = q->priv->bpool;
@@ -1602,7 +1834,7 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
.cookie = (pp2_cookie_t)(uint64_t)mbuf,
};
- pp2_bpool_put_buff(hifs[core_id], bpool, &binf);
+ pp2_bpool_put_buff(hif, bpool, &binf);
mrvl_port_bpool_size
[bpool->pp2_id][bpool->id][core_id]++;
q->drop_mac++;
@@ -1648,14 +1880,15 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
q->priv->bpool_init_size);
for (i = 0; i < pkt_to_remove; i++) {
- pp2_bpool_get_buff(hifs[core_id], bpool, &buff);
+ ret = pp2_bpool_get_buff(hif, bpool, &buff);
+ if (ret)
+ break;
mbuf = (struct rte_mbuf *)
(cookie_addr_high | buff.cookie);
rte_pktmbuf_free(mbuf);
}
mrvl_port_bpool_size
- [bpool->pp2_id][bpool->id][core_id] -=
- pkt_to_remove;
+ [bpool->pp2_id][bpool->id][core_id] -= i;
}
rte_spinlock_unlock(&q->priv->lock);
}
@@ -1740,11 +1973,12 @@ mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
*/
static inline void
mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
- struct mrvl_shadow_txq *sq, int qid, int force)
+ unsigned int core_id, struct mrvl_shadow_txq *sq,
+ int qid, int force)
{
struct buff_release_entry *entry;
uint16_t nb_done = 0, num = 0, skip_bufs = 0;
- int i, core_id = rte_lcore_id();
+ int i;
pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
@@ -1791,6 +2025,7 @@ skip:
sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
sq->size -= num;
num = 0;
+ skip_bufs = 0;
}
if (likely(num)) {
@@ -1817,18 +2052,23 @@ static uint16_t
mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct mrvl_txq *q = txq;
- struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
- struct pp2_hif *hif = hifs[rte_lcore_id()];
+ struct mrvl_shadow_txq *sq;
+ struct pp2_hif *hif;
struct pp2_ppio_desc descs[nb_pkts];
+ unsigned int core_id = rte_lcore_id();
int i, ret, bytes_sent = 0;
uint16_t num, sq_free_size;
uint64_t addr;
- if (unlikely(!q->priv->ppio))
+ hif = mrvl_get_hif(q->priv, core_id);
+ sq = &q->shadow_txqs[core_id];
+
+ if (unlikely(!q->priv->ppio || !hif))
return 0;
if (sq->size)
- mrvl_free_sent_buffers(q->priv->ppio, hif, sq, q->queue_id, 0);
+ mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
+ sq, q->queue_id, 0);
sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
if (unlikely(nb_pkts > sq_free_size)) {
@@ -1856,8 +2096,9 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
sq->ent[sq->head].buff.addr =
rte_mbuf_data_iova_default(mbuf);
sq->ent[sq->head].bpool =
- (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
- NULL : mrvl_port_to_bpool_lookup[mbuf->port];
+ (unlikely(mbuf->port >= RTE_MAX_ETHPORTS ||
+ mbuf->refcnt > 1)) ? NULL :
+ mrvl_port_to_bpool_lookup[mbuf->port];
sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
sq->size++;
@@ -2034,6 +2275,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
eth_dev->data->dev_private = priv;
eth_dev->device = &vdev->device;
eth_dev->dev_ops = &mrvl_ops;
@@ -2067,6 +2309,7 @@ mrvl_eth_dev_destroy(const char *name)
priv = eth_dev->data->dev_private;
pp2_bpool_deinit(priv->bpool);
+ used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
rte_free(priv);
rte_free(eth_dev->data->mac_addrs);
rte_eth_dev_release_port(eth_dev);
@@ -2090,41 +2333,9 @@ static int
mrvl_get_ifnames(const char *key __rte_unused, const char *value,
void *extra_args)
{
- const char **ifnames = extra_args;
-
- ifnames[mrvl_ports_nb++] = value;
-
- return 0;
-}
-
-/**
- * Initialize per-lcore MUSDK hardware interfaces (hifs).
- *
- * @return
- * 0 on success, negative error value otherwise.
- */
-static int
-mrvl_init_hifs(void)
-{
- struct pp2_hif_params params;
- char match[MRVL_MATCH_LEN];
- int i, ret;
+ struct mrvl_ifnames *ifnames = extra_args;
- RTE_LCORE_FOREACH(i) {
- ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
- if (ret < 0)
- return ret;
-
- snprintf(match, sizeof(match), "hif-%d", ret);
- memset(&params, 0, sizeof(params));
- params.match = match;
- params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
- ret = pp2_hif_init(&params, &hifs[i]);
- if (ret) {
- RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", i);
- return ret;
- }
- }
+ ifnames->names[ifnames->idx++] = value;
return 0;
}
@@ -2137,19 +2348,12 @@ mrvl_deinit_hifs(void)
{
int i;
- RTE_LCORE_FOREACH(i) {
+ for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
if (hifs[i])
pp2_hif_deinit(hifs[i]);
}
-}
-
-static void mrvl_set_first_last_cores(int core_id)
-{
- if (core_id < mrvl_lcore_first)
- mrvl_lcore_first = core_id;
-
- if (core_id > mrvl_lcore_last)
- mrvl_lcore_last = core_id;
+ used_hifs = MRVL_MUSDK_HIFS_RESERVED;
+ memset(hifs, 0, sizeof(hifs));
}
/**
@@ -2165,9 +2369,9 @@ static int
rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
{
struct rte_kvargs *kvlist;
- const char *ifnames[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
+ struct mrvl_ifnames ifnames;
int ret = -EINVAL;
- uint32_t i, ifnum, cfgnum, core_id;
+ uint32_t i, ifnum, cfgnum;
const char *params;
params = rte_vdev_device_args(vdev);
@@ -2179,21 +2383,34 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
return -EINVAL;
ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
- if (ifnum > RTE_DIM(ifnames))
+ if (ifnum > RTE_DIM(ifnames.names))
goto out_free_kvlist;
+ ifnames.idx = 0;
rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
mrvl_get_ifnames, &ifnames);
- cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
- if (cfgnum > 1) {
- RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
- goto out_free_kvlist;
- } else if (cfgnum == 1) {
- rte_kvargs_process(kvlist, MRVL_CFG_ARG,
- mrvl_get_qoscfg, &mrvl_qos_cfg);
+
+ /*
+ * The below system initialization should be done only once,
+ * on the first provided configuration file
+ */
+ if (!mrvl_qos_cfg) {
+ cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
+ RTE_LOG(INFO, PMD, "Parsing config file!\n");
+ if (cfgnum > 1) {
+ RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
+ goto out_free_kvlist;
+ } else if (cfgnum == 1) {
+ rte_kvargs_process(kvlist, MRVL_CFG_ARG,
+ mrvl_get_qoscfg, &mrvl_qos_cfg);
+ }
}
+ if (mrvl_dev_num)
+ goto init_devices;
+
+ RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n");
/*
* ret == -EEXIST is correct, it means DMA
* has been already initialized (by another PMD).
@@ -2213,37 +2430,33 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
goto out_deinit_dma;
}
- ret = mrvl_init_hifs();
- if (ret)
- goto out_deinit_hifs;
+ memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
+ memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
+ mrvl_lcore_first = RTE_MAX_LCORE;
+ mrvl_lcore_last = 0;
+
+init_devices:
for (i = 0; i < ifnum; i++) {
- RTE_LOG(INFO, PMD, "Creating %s\n", ifnames[i]);
- ret = mrvl_eth_dev_create(vdev, ifnames[i]);
+ RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]);
+ ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
if (ret)
goto out_cleanup;
}
+ mrvl_dev_num += ifnum;
rte_kvargs_free(kvlist);
- memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
-
- mrvl_lcore_first = RTE_MAX_LCORE;
- mrvl_lcore_last = 0;
-
- RTE_LCORE_FOREACH(core_id) {
- mrvl_set_first_last_cores(core_id);
- }
-
return 0;
out_cleanup:
for (; i > 0; i--)
- mrvl_eth_dev_destroy(ifnames[i]);
-out_deinit_hifs:
- mrvl_deinit_hifs();
- mrvl_deinit_pp2();
+ mrvl_eth_dev_destroy(ifnames.names[i]);
+
+ if (mrvl_dev_num == 0)
+ mrvl_deinit_pp2();
out_deinit_dma:
- mv_sys_dma_mem_destroy();
+ if (mrvl_dev_num == 0)
+ mv_sys_dma_mem_destroy();
out_free_kvlist:
rte_kvargs_free(kvlist);
@@ -2276,11 +2489,15 @@ rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
rte_eth_dev_get_name_by_port(i, ifname);
mrvl_eth_dev_destroy(ifname);
+ mrvl_dev_num--;
}
- mrvl_deinit_hifs();
- mrvl_deinit_pp2();
- mv_sys_dma_mem_destroy();
+ if (mrvl_dev_num == 0) {
+ RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n");
+ mrvl_deinit_hifs();
+ mrvl_deinit_pp2();
+ mv_sys_dma_mem_destroy();
+ }
return 0;
}
diff --git a/drivers/net/mrvl/mrvl_ethdev.h b/drivers/net/mrvl/mrvl_ethdev.h
index 2a4ab5ab..f7afae5c 100644
--- a/drivers/net/mrvl/mrvl_ethdev.h
+++ b/drivers/net/mrvl/mrvl_ethdev.h
@@ -36,7 +36,12 @@
#define _MRVL_ETHDEV_H_
#include <rte_spinlock.h>
+
+#include <env/mv_autogen_comp_flags.h>
+#include <drivers/mv_pp2.h>
+#include <drivers/mv_pp2_bpool.h>
#include <drivers/mv_pp2_cls.h>
+#include <drivers/mv_pp2_hif.h>
#include <drivers/mv_pp2_ppio.h>
/** Maximum number of rx queues per port */
@@ -110,7 +115,4 @@ struct mrvl_priv {
uint16_t nb_rx_queues;
};
-/** Number of ports configured. */
-extern int mrvl_ports_nb;
-
#endif /* _MRVL_ETHDEV_H_ */
diff --git a/drivers/net/mrvl/mrvl_qos.c b/drivers/net/mrvl/mrvl_qos.c
index 7c9943aa..fbb36813 100644
--- a/drivers/net/mrvl/mrvl_qos.c
+++ b/drivers/net/mrvl/mrvl_qos.c
@@ -369,7 +369,7 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
}
/* Use the number of ports given as vdev parameters. */
- for (n = 0; n < mrvl_ports_nb; ++n) {
+ for (n = 0; n < (PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC); ++n) {
snprintf(sec_name, sizeof(sec_name), "%s %d %s",
MRVL_TOK_PORT, n, MRVL_TOK_DEFAULT);
diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile
index 4ba066ac..aa3b68a4 100644
--- a/drivers/net/nfp/Makefile
+++ b/drivers/net/nfp/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 0501156b..e5bfde62 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -43,7 +43,7 @@
#include <rte_common.h>
#include <rte_log.h>
#include <rte_debug.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_dev.h>
#include <rte_ether.h>
@@ -95,6 +95,15 @@ static void nfp_net_stop(struct rte_eth_dev *dev);
static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
+static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+
/*
* The offset of the queue controller queues in the PCIe Target. These
* happen to be at the same offset on the NFP6000 and the NFP3200 so
@@ -489,12 +498,10 @@ nfp_net_configure(struct rte_eth_dev *dev)
}
if (rxmode->jumbo_frame)
- /* this is handled in rte_eth_dev_configure */
+ hw->mtu = rxmode->max_rx_pkt_len;
- if (rxmode->hw_strip_crc) {
- PMD_INIT_LOG(INFO, "strip CRC not supported");
- return -EINVAL;
- }
+ if (!rxmode->hw_strip_crc)
+ PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable");
if (rxmode->enable_scatter) {
PMD_INIT_LOG(INFO, "Scatter not supported");
@@ -737,6 +744,8 @@ nfp_net_start(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
uint32_t intr_vector;
@@ -786,6 +795,19 @@ nfp_net_start(struct rte_eth_dev *dev)
rte_intr_enable(intr_handle);
+ dev_conf = &dev->data->dev_conf;
+ rxmode = &dev_conf->rxmode;
+
+ /* Checking RX mode */
+ if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
+ if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
+ if (!nfp_net_rss_config_default(dev))
+ update |= NFP_NET_CFG_UPDATE_RSS;
+ } else {
+ PMD_INIT_LOG(INFO, "RSS not supported");
+ return -EINVAL;
+ }
+ }
/* Enable device */
new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
@@ -1196,7 +1218,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
- dev_info->max_rx_pktlen = hw->mtu;
+ dev_info->max_rx_pktlen = hw->max_mtu;
/* Next should change when PF support is implemented */
dev_info->max_mac_addrs = 1;
@@ -1450,7 +1472,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
nfp_net_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
nfp_net_dev_link_status_print(dev);
@@ -1469,6 +1491,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
return -EINVAL;
+ /* mtu setting is forbidden if port is started */
+ if (dev->data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev->data->port_id);
+ return -EBUSY;
+ }
+
/* switch to jumbo mode if needed */
if ((uint32_t)mtu > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
@@ -2345,22 +2374,17 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
return ret;
}
-/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
static int
-nfp_net_reta_update(struct rte_eth_dev *dev,
+nfp_net_rss_reta_write(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
uint32_t reta, mask;
int i, j;
int idx, shift;
- uint32_t update;
struct nfp_net_hw *hw =
NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
- return -EINVAL;
-
if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
@@ -2397,6 +2421,26 @@ nfp_net_reta_update(struct rte_eth_dev *dev,
nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
reta);
}
+ return 0;
+}
+
+/* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
+static int
+nfp_net_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct nfp_net_hw *hw =
+ NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t update;
+ int ret;
+
+ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
+ return -EINVAL;
+
+ ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
+ if (ret != 0)
+ return ret;
update = NFP_NET_CFG_UPDATE_RSS;
@@ -2455,33 +2499,24 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
}
static int
-nfp_net_rss_hash_update(struct rte_eth_dev *dev,
+nfp_net_rss_hash_write(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- uint32_t update;
+ struct nfp_net_hw *hw;
+ uint64_t rss_hf;
uint32_t cfg_rss_ctrl = 0;
uint8_t key;
- uint64_t rss_hf;
int i;
- struct nfp_net_hw *hw;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- rss_hf = rss_conf->rss_hf;
-
- /* Checking if RSS is enabled */
- if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
- if (rss_hf != 0) { /* Enable RSS? */
- RTE_LOG(ERR, PMD, "RSS unsupported\n");
- return -EINVAL;
- }
- return 0; /* Nothing to do */
+ /* Writing the key byte a byte */
+ for (i = 0; i < rss_conf->rss_key_len; i++) {
+ memcpy(&key, &rss_conf->rss_key[i], 1);
+ nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
}
- if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
- RTE_LOG(ERR, PMD, "hash key too long\n");
- return -EINVAL;
- }
+ rss_hf = rss_conf->rss_hf;
if (rss_hf & ETH_RSS_IPV4)
cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
@@ -2499,15 +2534,40 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
/* configuring where to apply the RSS hash */
nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
- /* Writing the key byte a byte */
- for (i = 0; i < rss_conf->rss_key_len; i++) {
- memcpy(&key, &rss_conf->rss_key[i], 1);
- nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
- }
-
/* Writing the key size */
nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
+ return 0;
+}
+
+static int
+nfp_net_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ uint32_t update;
+ uint64_t rss_hf;
+ struct nfp_net_hw *hw;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ rss_hf = rss_conf->rss_hf;
+
+ /* Checking if RSS is enabled */
+ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
+ if (rss_hf != 0) { /* Enable RSS? */
+ RTE_LOG(ERR, PMD, "RSS unsupported\n");
+ return -EINVAL;
+ }
+ return 0; /* Nothing to do */
+ }
+
+ if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
+ RTE_LOG(ERR, PMD, "hash key too long\n");
+ return -EINVAL;
+ }
+
+ nfp_net_rss_hash_write(dev, rss_conf);
+
update = NFP_NET_CFG_UPDATE_RSS;
if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
@@ -2564,6 +2624,47 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+static int
+nfp_net_rss_config_default(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rss_conf rss_conf;
+ struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
+ uint16_t rx_queues = dev->data->nb_rx_queues;
+ uint16_t queue;
+ int i, j, ret;
+
+ RTE_LOG(INFO, PMD, "setting default RSS conf for %u queues\n",
+ rx_queues);
+
+ nfp_reta_conf[0].mask = ~0x0;
+ nfp_reta_conf[1].mask = ~0x0;
+
+ queue = 0;
+ for (i = 0; i < 0x40; i += 8) {
+ for (j = i; j < (i + 8); j++) {
+ nfp_reta_conf[0].reta[j] = queue;
+ nfp_reta_conf[1].reta[j] = queue++;
+ queue %= rx_queues;
+ }
+ }
+ ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
+ if (ret != 0)
+ return ret;
+
+ dev_conf = &dev->data->dev_conf;
+ if (!dev_conf) {
+ RTE_LOG(INFO, PMD, "wrong rss conf");
+ return -EINVAL;
+ }
+ rss_conf = dev_conf->rx_adv_conf.rss_conf;
+
+ ret = nfp_net_rss_hash_write(dev, &rss_conf);
+
+ return ret;
+}
+
+
/* Initialise and register driver with DPDK Application */
static const struct eth_dev_ops nfp_net_eth_dev_ops = {
.dev_configure = nfp_net_configure,
@@ -2774,7 +2875,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
- hw->mtu = hw->max_mtu;
+ hw->mtu = ETHER_MTU;
if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
hw->rx_offset = NFP_NET_RX_OFFSET;
@@ -2984,6 +3085,9 @@ nfpu_error:
return ret;
}
+int nfp_logtype_init;
+int nfp_logtype_driver;
+
static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
@@ -3057,6 +3161,17 @@ RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
+RTE_INIT(nfp_init_log);
+static void
+nfp_init_log(void)
+{
+ nfp_logtype_init = rte_log_register("pmd.net.nfp.init");
+ if (nfp_logtype_init >= 0)
+ rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE);
+ nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver");
+ if (nfp_logtype_driver >= 0)
+ rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE);
+}
/*
* Local variables:
* c-file-style: "Linux"
diff --git a/drivers/net/nfp/nfp_net_logs.h b/drivers/net/nfp/nfp_net_logs.h
index 0b966e43..3fe24e96 100644
--- a/drivers/net/nfp/nfp_net_logs.h
+++ b/drivers/net/nfp/nfp_net_logs.h
@@ -34,16 +34,10 @@
#include <rte_log.h>
-#define RTE_LIBRTE_NFP_NET_DEBUG_INIT 1
-
-#ifdef RTE_LIBRTE_NFP_NET_DEBUG_INIT
+extern int nfp_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_LOG(level, fmt, args...) do { } while (0)
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
#ifdef RTE_LIBRTE_NFP_NET_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -55,21 +49,15 @@
#ifdef RTE_LIBRTE_NFP_NET_DEBUG_TX
#define PMD_TX_LOG(level, fmt, args...) \
RTE_LOG(level, PMD, "%s() tx: " fmt, __func__, ## args)
+#define ASSERT(x) if (!(x)) rte_panic("NFP_NET: x")
#else
#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#define ASSERT(x) do { } while (0)
#endif
-#ifdef RTE_LIBRTE_NFP_NET_DEBUG_DRIVER
+extern int nfp_logtype_driver;
#define PMD_DRV_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_NFP_NET_DEBUG_INIT
-#define ASSERT(x) if (!(x)) rte_panic("NFP_NET: x")
-#else
-#define ASSERT(x) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, nfp_logtype_driver, \
+ "%s(): " fmt, __func__, ## args)
#endif /* _NFP_NET_LOGS_H_ */
diff --git a/drivers/net/null/meson.build b/drivers/net/null/meson.build
new file mode 100644
index 00000000..68ac0d2a
--- /dev/null
+++ b/drivers/net/null/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('rte_eth_null.c')
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 032c30e9..d003b283 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -32,7 +32,7 @@
*/
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -91,7 +91,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_AUTONEG,
+ .link_autoneg = ETH_LINK_AUTONEG,
};
static uint16_t
@@ -278,6 +278,11 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
return 0;
}
+static int
+eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
+{
+ return 0;
+}
static void
eth_dev_info(struct rte_eth_dev *dev,
@@ -456,6 +461,12 @@ eth_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+static void
+eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused struct ether_addr *addr)
+{
+}
+
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
@@ -465,7 +476,9 @@ static const struct eth_dev_ops ops = {
.tx_queue_setup = eth_tx_queue_setup,
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
+ .mtu_set = eth_mtu_set,
.link_update = eth_link_update,
+ .mac_addr_set = eth_mac_address_set,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
.reta_update = eth_rss_reta_update,
diff --git a/drivers/net/octeontx/Makefile b/drivers/net/octeontx/Makefile
index 9c27fdfe..3e4a1066 100644
--- a/drivers/net/octeontx/Makefile
+++ b/drivers/net/octeontx/Makefile
@@ -1,33 +1,7 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Cavium Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Cavium Networks nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
#
+
include $(RTE_SDK)/mk/rte.vars.mk
#
@@ -69,6 +43,8 @@ else
CFLAGS_octeontx_rxtx.o += -O3 -Ofast
endif
+CFLAGS_octeontx_ethdev.o += -DALLOW_EXPERIMENTAL_API
+
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_mempool_octeontx
diff --git a/drivers/net/octeontx/base/meson.build b/drivers/net/octeontx/base/meson.build
new file mode 100644
index 00000000..09f657ab
--- /dev/null
+++ b/drivers/net/octeontx/base/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = [
+ 'octeontx_pkovf.c',
+ 'octeontx_pkivf.c',
+ 'octeontx_bgx.c'
+]
+
+depends = ['ethdev', 'mempool_octeontx']
+static_objs = []
+foreach d: depends
+ static_objs += [get_variable('static_rte_' + d)]
+endforeach
+
+base_lib = static_library('octeontx_base', sources,
+ c_args: cflags,
+ dependencies: static_objs,
+)
+
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/net/octeontx/base/octeontx_bgx.c b/drivers/net/octeontx/base/octeontx_bgx.c
index c2d0d433..8576d8ed 100644
--- a/drivers/net/octeontx/base/octeontx_bgx.c
+++ b/drivers/net/octeontx/base/octeontx_bgx.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include <string.h>
diff --git a/drivers/net/octeontx/base/octeontx_bgx.h b/drivers/net/octeontx/base/octeontx_bgx.h
index f740a1d9..ff265149 100644
--- a/drivers/net/octeontx/base/octeontx_bgx.h
+++ b/drivers/net/octeontx/base/octeontx_bgx.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef __OCTEONTX_BGX_H__
diff --git a/drivers/net/octeontx/base/octeontx_io.h b/drivers/net/octeontx/base/octeontx_io.h
index ec4ce1dc..d51ded23 100644
--- a/drivers/net/octeontx/base/octeontx_io.h
+++ b/drivers/net/octeontx/base/octeontx_io.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef __OCTEONTX_IO_H__
diff --git a/drivers/net/octeontx/base/octeontx_pki_var.h b/drivers/net/octeontx/base/octeontx_pki_var.h
index def6cbb9..c793b655 100644
--- a/drivers/net/octeontx/base/octeontx_pki_var.h
+++ b/drivers/net/octeontx/base/octeontx_pki_var.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef __OCTEONTX_PKI_VAR_H__
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index b97f05cd..58a7f110 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -1,34 +1,7 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
+
#include <string.h>
#include <rte_eal.h>
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index 004dedcc..d036054c 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef __OCTEONTX_PKI_H__
diff --git a/drivers/net/octeontx/base/octeontx_pkovf.c b/drivers/net/octeontx/base/octeontx_pkovf.c
index f01d948e..0a6d64b8 100644
--- a/drivers/net/octeontx/base/octeontx_pkovf.c
+++ b/drivers/net/octeontx/base/octeontx_pkovf.c
@@ -1,34 +1,7 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
+
#include <stdbool.h>
#include <string.h>
#include <stdio.h>
diff --git a/drivers/net/octeontx/base/octeontx_pkovf.h b/drivers/net/octeontx/base/octeontx_pkovf.h
index cfc3715d..cbd28249 100644
--- a/drivers/net/octeontx/base/octeontx_pkovf.h
+++ b/drivers/net/octeontx/base/octeontx_pkovf.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef __OCTEONTX_PKO_H__
diff --git a/drivers/net/octeontx/meson.build b/drivers/net/octeontx/meson.build
new file mode 100644
index 00000000..0e249eb9
--- /dev/null
+++ b/drivers/net/octeontx/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+subdir('base')
+objs = [base_objs]
+
+sources = files('octeontx_rxtx.c',
+ 'octeontx_ethdev.c'
+ )
+
+allow_experimental_apis = true
+
+deps += ['mempool_octeontx', 'eventdev']
+
+includes += include_directories('base')
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index bd24ec33..b739c0b3 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -1,34 +1,7 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
+
#include <stdio.h>
#include <stdarg.h>
#include <stdbool.h>
@@ -43,6 +16,7 @@
#include <rte_dev.h>
#include <rte_kvargs.h>
#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
#include <rte_prefetch.h>
#include <rte_bus_vdev.h>
@@ -54,6 +28,9 @@ struct octeontx_vdev_init_params {
uint8_t nr_port;
};
+uint16_t
+rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX];
+
enum octeontx_link_speed {
OCTEONTX_LINK_SPEED_SGMII,
OCTEONTX_LINK_SPEED_XAUI,
@@ -65,6 +42,27 @@ enum octeontx_link_speed {
OCTEONTX_LINK_SPEED_RESERVE2
};
+int otx_net_logtype_mbox;
+int otx_net_logtype_init;
+int otx_net_logtype_driver;
+
+RTE_INIT(otx_net_init_log);
+static void
+otx_net_init_log(void)
+{
+ otx_net_logtype_mbox = rte_log_register("pmd.net.octeontx.mbox");
+ if (otx_net_logtype_mbox >= 0)
+ rte_log_set_level(otx_net_logtype_mbox, RTE_LOG_NOTICE);
+
+ otx_net_logtype_init = rte_log_register("pmd.net.octeontx.init");
+ if (otx_net_logtype_init >= 0)
+ rte_log_set_level(otx_net_logtype_init, RTE_LOG_NOTICE);
+
+ otx_net_logtype_driver = rte_log_register("pmd.net.octeontx.driver");
+ if (otx_net_logtype_driver >= 0)
+ rte_log_set_level(otx_net_logtype_driver, RTE_LOG_NOTICE);
+}
+
/* Parse integer from integer argument */
static int
parse_integer_arg(const char *key __rte_unused,
@@ -572,8 +570,8 @@ octeontx_dev_link_update(struct rte_eth_dev *dev,
break;
}
- link.link_duplex = ETH_LINK_AUTONEG;
- link.link_autoneg = ETH_LINK_SPEED_AUTONEG;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
return octeontx_atomic_write_link_status(dev, &link);
}
@@ -998,6 +996,17 @@ octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+octeontx_pool_ops(struct rte_eth_dev *dev, const char *pool)
+{
+ RTE_SET_USED(dev);
+
+ if (!strcmp(pool, "octeontx_fpavf"))
+ return 0;
+
+ return -ENOTSUP;
+}
+
/* Initialize and register driver with DPDK Application */
static const struct eth_dev_ops octeontx_dev_ops = {
.dev_configure = octeontx_dev_configure,
@@ -1018,6 +1027,7 @@ static const struct eth_dev_ops octeontx_dev_ops = {
.rx_queue_setup = octeontx_dev_rx_queue_setup,
.rx_queue_release = octeontx_dev_rx_queue_release,
.dev_supported_ptypes_get = octeontx_dev_supported_ptypes_get,
+ .pool_ops_supported = octeontx_pool_ops,
};
/* Create Ethdev interface per BGX LMAC ports */
@@ -1133,6 +1143,9 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
nic->num_tx_queues);
PMD_INIT_LOG(DEBUG, "speed %d mtu %d", nic->speed, nic->mtu);
+ rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7]
+ [(nic->base_ochan >> 4) & 0xF] = data->port_id;
+
return data->port_id;
err:
@@ -1314,6 +1327,7 @@ octeontx_probe(struct rte_vdev_device *dev)
res = -ENOTSUP;
goto parse_error;
}
+ rte_mbuf_set_platform_mempool_ops("octeontx_fpavf");
probe_once = 1;
return 0;
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index c47d4c6d..10e42e14 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -1,41 +1,14 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
+
#ifndef __OCTEONTX_ETHDEV_H__
#define __OCTEONTX_ETHDEV_H__
#include <stdbool.h>
#include <rte_common.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_eventdev.h>
#include <rte_mempool.h>
#include <rte_memory.h>
@@ -52,12 +25,18 @@
#define OCTEONTX_VDEV_NR_PORT_ARG ("nr_port")
#define OCTEONTX_MAX_NAME_LEN 32
+#define OCTEONTX_MAX_BGX_PORTS 4
+#define OCTEONTX_MAX_LMAC_PER_BGX 4
+
static inline struct octeontx_nic *
octeontx_pmd_priv(struct rte_eth_dev *dev)
{
return dev->data->dev_private;
}
+extern uint16_t
+rte_octeontx_pchan_map[OCTEONTX_MAX_BGX_PORTS][OCTEONTX_MAX_LMAC_PER_BGX];
+
/* Octeontx ethdev nic */
struct octeontx_nic {
struct rte_eth_dev *dev;
diff --git a/drivers/net/octeontx/octeontx_logs.h b/drivers/net/octeontx/octeontx_logs.h
index d5da7331..ccb8a1b0 100644
--- a/drivers/net/octeontx/octeontx_logs.h
+++ b/drivers/net/octeontx/octeontx_logs.h
@@ -1,76 +1,33 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
+
#ifndef __OCTEONTX_LOGS_H__
#define __OCTEONTX_LOGS_H__
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ rte_log(RTE_LOG_ ## level, otx_net_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
-#ifdef RTE_LIBRTE_OCTEONTX_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, ">>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_OCTEONTX_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_OCTEONTX_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-#ifdef RTE_LIBRTE_OCTEONTX_DEBUG_DRIVER
#define PMD_DRV_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, otx_net_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
-#ifdef RTE_LIBRTE_OCTEONTX_DEBUG_MBOX
#define PMD_MBOX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_MBOX_LOG(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, otx_net_logtype_mbox, \
+ "%s(): " fmt "\n", __func__, ## args)
#define octeontx_log_err(s, ...) PMD_INIT_LOG(ERR, s, ##__VA_ARGS__)
#define octeontx_log_dbg(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__)
#define octeontx_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__)
+#define PMD_RX_LOG PMD_DRV_LOG
+#define PMD_TX_LOG PMD_DRV_LOG
+
+extern int otx_net_logtype_init;
+extern int otx_net_logtype_driver;
+extern int otx_net_logtype_mbox;
+
#endif /* __OCTEONTX_LOGS_H__*/
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index c97d5b35..2502d90e 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#include <stdint.h>
@@ -37,7 +9,7 @@
#include <rte_atomic.h>
#include <rte_common.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ether.h>
#include <rte_log.h>
#include <rte_mbuf.h>
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index 382ff2b2..fe3e5ccd 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -1,39 +1,11 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium Inc. 2017. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium networks nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
*/
#ifndef __OCTEONTX_RXTX_H__
#define __OCTEONTX_RXTX_H__
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#ifndef __hot
#define __hot __attribute__((hot))
diff --git a/drivers/net/octeontx/rte_pmd_octeontx_version.map b/drivers/net/octeontx/rte_pmd_octeontx_version.map
index a70bd197..a3161b14 100644
--- a/drivers/net/octeontx/rte_pmd_octeontx_version.map
+++ b/drivers/net/octeontx/rte_pmd_octeontx_version.map
@@ -2,3 +2,10 @@ DPDK_17.11 {
local: *;
};
+
+DPDK_18.02 {
+ global:
+
+ rte_octeontx_pchan_map;
+
+} DPDK_17.11;
diff --git a/drivers/net/pcap/Makefile b/drivers/net/pcap/Makefile
index b6487d42..ef332162 100644
--- a/drivers/net/pcap/Makefile
+++ b/drivers/net/pcap/Makefile
@@ -1,34 +1,7 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# Copyright(c) 2014 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation.
+# Copyright(c) 2014 6WIND S.A.
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/pcap/meson.build b/drivers/net/pcap/meson.build
new file mode 100644
index 00000000..8b81214e
--- /dev/null
+++ b/drivers/net/pcap/meson.build
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+if meson.is_cross_build()
+ pcap_dep = cc.find_library('pcap', required: false)
+ if pcap_dep.found()
+ ext_deps += pcap_dep
+ else
+ build = false
+ endif
+else
+ pcap_dep = dependency('pcap', required: false)
+ if pcap_dep.found() == true
+ ext_deps += pcap_dep
+ elif find_program('pcap-config', required: false).found() == true
+ ext_deps += cc.find_library('pcap')
+ else
+ build = false
+ endif
+endif
+sources = files('rte_eth_pcap.c')
+pkgconfig_extra_libs += '-lpcap'
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 5a86752f..c1571e1f 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation.
+ * Copyright(c) 2014 6WIND S.A.
+ * All rights reserved.
*/
#include <time.h>
@@ -39,7 +11,7 @@
#include <pcap.h>
#include <rte_cycles.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_vdev.h>
#include <rte_kvargs.h>
#include <rte_malloc.h>
@@ -124,7 +96,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_FIXED,
+ .link_autoneg = ETH_LINK_AUTONEG,
};
static int
@@ -806,7 +778,7 @@ pmd_init_internals(struct rte_vdev_device *vdev,
const char *name;
name = rte_vdev_device_name(vdev);
- RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %u\n",
+ RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %d\n",
numa_node);
/* now do all data allocation - for eth_dev structure
@@ -1036,7 +1008,7 @@ pmd_pcap_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
- RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %u\n",
+ RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %d\n",
rte_socket_id());
if (!dev)
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
index ba272a91..d8f69ad6 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -128,7 +128,7 @@ struct ecore_chain {
} pbl_sp;
/* Address of first page of the chain - the address is required
- * for fastpath operation [consume/produce] but only for the the SINGLE
+ * for fastpath operation [consume/produce] but only for the SINGLE
* flavour which isn't considered fastpath [== SPQ].
*/
void *p_virt_addr;
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 632297a7..21ddda92 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -216,10 +216,9 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
*type = DCBX_PROTOCOL_ETH;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
- DP_ERR(p_hwfn,
- "No action required, App TLV id = 0x%x"
- " app_prio_bitmap = 0x%x\n",
- id, app_prio_bitmap);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "No action required, App TLV entry = 0x%x\n",
+ app_prio_bitmap);
return false;
}
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index da1830ce..744d2043 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -3182,7 +3182,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
* resources allocation queries should be atomic. Since several PFs can
* run in parallel - a resource lock is needed.
* If either the resource lock or resource set value commands are not
- * supported - skip the the max values setting, release the lock if
+ * supported - skip the max values setting, release the lock if
* needed, and proceed to the queries. Other failures, including a
* failure to acquire the lock, will cause this function to fail.
* Old drivers that don't acquire the lock can run in parallel, and
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index be3e91f0..225890e2 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -552,7 +552,7 @@ struct ecore_mcp_link_capabilities
*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn);
/**
- * @brief Request the MFW to set the the link according to 'link_input'.
+ * @brief Request the MFW to set the link according to 'link_input'.
*
* @param p_hwfn
* @param p_ptt
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index 66622323..abca7408 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -33,7 +33,7 @@ struct ecore_eth_pf_params {
u32 num_arfs_filters;
};
-/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+/* Most of the parameters below are described in the FW iSCSI / TCP HSI */
struct ecore_iscsi_pf_params {
u64 glbl_q_params_addr;
u64 bdq_pbl_base_addr[2];
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index 25109dbd..e0f2dd5a 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -1385,6 +1385,12 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
if (sge_tpa_params->tpa_gro_consistent_flg)
p_sge_tpa_tlv->sge_tpa_flags |=
VFPF_TPA_GRO_CONSIST_FLAG;
+ if (sge_tpa_params->tpa_ipv4_tunn_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_TUNN_IPV4_EN_FLAG;
+ if (sge_tpa_params->tpa_ipv6_tunn_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_TUNN_IPV6_EN_FLAG;
p_sge_tpa_tlv->tpa_max_aggs_num =
sge_tpa_params->tpa_max_aggs_num;
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 3ccc7665..ecb00649 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -424,6 +424,8 @@ struct vfpf_vport_update_sge_tpa_tlv {
#define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
#define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
+ #define VFPF_TPA_TUNN_IPV4_EN_FLAG (1 << 5)
+ #define VFPF_TPA_TUNN_IPV6_EN_FLAG (1 << 6)
u8 update_sge_tpa_flags;
#define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 6f5ba2a9..a91f4368 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -9,13 +9,17 @@
#include "qede_ethdev.h"
#include <rte_alarm.h>
#include <rte_version.h>
+#include <rte_kvargs.h>
/* Globals */
+int qede_logtype_init;
+int qede_logtype_driver;
+
static const struct qed_eth_ops *qed_ops;
static int64_t timer_period = 1;
/* VXLAN tunnel classification mapping */
-const struct _qede_vxlan_tunn_types {
+const struct _qede_udp_tunn_types {
uint16_t rte_filter_type;
enum ecore_filter_ucast_type qede_type;
enum ecore_tunn_clss qede_tunn_clss;
@@ -353,7 +357,6 @@ qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
qdev->ops = qed_ops;
}
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
static void qede_print_adapter_info(struct qede_dev *qdev)
{
struct ecore_dev *edev = &qdev->edev;
@@ -383,56 +386,115 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, " Firmware file : %s\n", fw_file);
DP_INFO(edev, "*********************************\n");
}
-#endif
-static int
-qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
+static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
{
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_sp_vport_start_params params;
+ unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+ struct qede_tx_queue *txq;
+
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
+
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ for_each_rss(qid) {
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rcv_pkts), 0,
+ sizeof(uint64_t));
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rx_hw_errors), 0,
+ sizeof(uint64_t));
+ OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
+ sizeof(uint64_t));
+
+ if (xstats)
+ for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
+ OSAL_MEMSET((((char *)
+ (qdev->fp_array[qid].rxq)) +
+ qede_rxq_xstats_strings[j].offset),
+ 0,
+ sizeof(uint64_t));
+
+ i++;
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+
+ i = 0;
+
+ for_each_tss(qid) {
+ txq = qdev->fp_array[qid].txq;
+
+ OSAL_MEMSET((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue, xmit_pkts)), 0,
+ sizeof(uint64_t));
+
+ i++;
+ if (i == txq_stat_cntrs)
+ break;
+ }
+}
+
+static int
+qede_stop_vport(struct ecore_dev *edev)
+{
struct ecore_hwfn *p_hwfn;
+ uint8_t vport_id;
int rc;
int i;
- memset(&params, 0, sizeof(params));
- params.vport_id = 0;
- params.mtu = mtu;
- /* @DPDK - Disable FW placement */
- params.zero_placement_offset = 1;
+ vport_id = 0;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- params.concrete_fid = p_hwfn->hw_info.concrete_fid;
- params.opaque_fid = p_hwfn->hw_info.opaque_fid;
- rc = ecore_sp_vport_start(p_hwfn, &params);
+ rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
+ vport_id);
if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
return rc;
}
}
- ecore_reset_vport_stats(edev);
- DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
+
+ DP_INFO(edev, "vport stopped\n");
return 0;
}
static int
-qede_stop_vport(struct ecore_dev *edev)
+qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_start_params params;
struct ecore_hwfn *p_hwfn;
- uint8_t vport_id;
int rc;
int i;
- vport_id = 0;
+ if (qdev->vport_started)
+ qede_stop_vport(edev);
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ /* @DPDK - Disable FW placement */
+ params.zero_placement_offset = 1;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
- vport_id);
+ params.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_start(p_hwfn, &params);
if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
return rc;
}
}
+ ecore_reset_vport_stats(edev);
+ qdev->vport_started = true;
+ DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
return 0;
}
@@ -453,13 +515,13 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
params.update_vport_active_tx_flg = 1;
params.vport_active_rx_flg = flg;
params.vport_active_tx_flg = flg;
-#ifndef RTE_LIBRTE_QEDE_VF_TX_SWITCH
- if (IS_VF(edev)) {
- params.update_tx_switching_flg = 1;
- params.tx_switching_flg = !flg;
- DP_INFO(edev, "VF tx-switching is disabled\n");
+ if (!qdev->enable_tx_switching) {
+ if (IS_VF(edev)) {
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = !flg;
+ DP_INFO(edev, "VF tx-switching is disabled\n");
+ }
}
-#endif
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
params.opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -482,8 +544,8 @@ qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
/* Enable LRO in split mode */
sge_tpa_params->tpa_ipv4_en_flg = enable;
sge_tpa_params->tpa_ipv6_en_flg = enable;
- sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
- sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
/* set if tpa enable changes */
sge_tpa_params->update_tpa_en_flg = 1;
/* set if tpa parameters should be handled */
@@ -612,48 +674,127 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
}
static int
-qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- bool enable, bool mask)
+qede_tunnel_update(struct qede_dev *qdev,
+ struct ecore_tunnel_info *tunn_info)
{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_ptt *p_ptt;
- struct ecore_tunnel_info tunn;
struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
int i;
- memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
- tunn.vxlan.b_update_mode = enable;
- tunn.vxlan.b_mode_enabled = mask;
- tunn.b_update_rx_cls = true;
- tunn.b_update_tx_cls = true;
- tunn.vxlan.tun_cls = clss;
-
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
if (IS_PF(edev)) {
p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt)
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Can't acquire PTT\n");
return -EAGAIN;
+ }
} else {
p_ptt = NULL;
}
+
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
- &tunn, ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- tunn.vxlan.tun_cls);
- if (IS_PF(edev))
- ecore_ptt_release(p_hwfn, p_ptt);
+ tunn_info, ECORE_SPQ_MODE_CB, NULL);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ if (rc != ECORE_SUCCESS)
break;
- }
}
+ return rc;
+}
+
+static int
+qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ if (qdev->vxlan.enable == enable)
+ return ECORE_SUCCESS;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.vxlan.b_update_mode = true;
+ tunn.vxlan.b_mode_enabled = enable;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+ tunn.vxlan.tun_cls = clss;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
+
+ rc = qede_tunnel_update(qdev, &tunn);
if (rc == ECORE_SUCCESS) {
qdev->vxlan.enable = enable;
qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
- DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled");
+ DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
+ enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ }
+
+ return rc;
+}
+
+static int
+qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.l2_geneve.b_update_mode = true;
+ tunn.l2_geneve.b_mode_enabled = enable;
+ tunn.ip_geneve.b_update_mode = true;
+ tunn.ip_geneve.b_mode_enabled = enable;
+ tunn.l2_geneve.tun_cls = clss;
+ tunn.ip_geneve.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->geneve.enable = enable;
+ qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
+ DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
+ enable ? "enabled" : "disabled", qdev->geneve.udp_port);
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+static int
+qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ enum rte_eth_tunnel_type tunn_type, bool enable)
+{
+ int rc = -EINVAL;
+
+ switch (tunn_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ rc = qede_vxlan_enable(eth_dev, clss, enable);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ rc = qede_geneve_enable(eth_dev, clss, enable);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
}
return rc;
@@ -1057,6 +1198,8 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
" and classification is based on outer tag only\n");
+ qdev->vlan_offload_mask = mask;
+
DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
@@ -1075,9 +1218,7 @@ static void qede_prandom_bytes(uint32_t *buff)
int qede_config_rss(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-#endif
uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
struct rte_eth_rss_reta_entry64 reta_conf[2];
struct rte_eth_rss_conf rss_conf;
@@ -1132,13 +1273,6 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE(edev);
- /* Update MTU only if it has changed */
- if (qdev->mtu != qdev->new_mtu) {
- if (qede_update_mtu(eth_dev, qdev->new_mtu))
- goto err;
- qdev->mtu = qdev->new_mtu;
- }
-
/* Configure TPA parameters */
if (rxmode->enable_lro) {
if (qede_enable_tpa(eth_dev, true))
@@ -1152,6 +1286,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
if (qede_start_queues(eth_dev))
goto err;
+ if (IS_PF(edev))
+ qede_reset_queue_stats(qdev, true);
+
/* Newer SR-IOV PF driver expects RX/TX queues to be started before
* enabling RSS. Hence RSS configuration is deferred upto this point.
* Also, we would like to retain similar behavior in PF case, so we
@@ -1165,9 +1302,6 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
if (qede_activate_vport(eth_dev, true))
goto err;
- /* Bring-up the link */
- qede_dev_set_link_state(eth_dev, true);
-
/* Update link status */
qede_link_update(eth_dev, 0);
@@ -1202,12 +1336,69 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
/* Disable traffic */
ecore_hw_stop_fastpath(edev); /* TBD - loop */
- /* Bring the link down */
- qede_dev_set_link_state(eth_dev, false);
-
DP_INFO(edev, "Device is stopped\n");
}
+#define QEDE_TX_SWITCHING "vf_txswitch"
+
+const char *valid_args[] = {
+ QEDE_TX_SWITCHING,
+ NULL,
+};
+
+static int qede_args_check(const char *key, const char *val, void *opaque)
+{
+ unsigned long tmp;
+ int ret = 0;
+ struct rte_eth_dev *eth_dev = opaque;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ errno = 0;
+ tmp = strtoul(val, NULL, 0);
+ if (errno) {
+ DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
+ return errno;
+ }
+
+ if (strcmp(QEDE_TX_SWITCHING, key) == 0)
+ qdev->enable_tx_switching = !!tmp;
+
+ return ret;
+}
+
+static int qede_args(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_kvargs *kvlist;
+ struct rte_devargs *devargs;
+ int ret;
+ int i;
+
+ devargs = pci_dev->device.devargs;
+ if (!devargs)
+ return 0; /* return success */
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (kvlist == NULL)
+ return -EINVAL;
+
+ /* Process parameters. */
+ for (i = 0; (valid_args[i] != NULL); ++i) {
+ if (rte_kvargs_count(kvlist, valid_args[i])) {
+ ret = rte_kvargs_process(kvlist, valid_args[i],
+ qede_args_check, eth_dev);
+ if (ret != ECORE_SUCCESS) {
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+ }
+ }
+ rte_kvargs_free(kvlist);
+
+ return 0;
+}
+
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
@@ -1233,6 +1424,21 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
}
}
+ /* We need to have min 1 RX queue.There is no min check in
+ * rte_eth_dev_configure(), so we are checking it here.
+ */
+ if (eth_dev->data->nb_rx_queues == 0) {
+ DP_ERR(edev, "Minimum one RX queue is required\n");
+ return -EINVAL;
+ }
+
+ /* Enable Tx switching by default */
+ qdev->enable_tx_switching = 1;
+
+ /* Parse devargs and fix up rxmode */
+ if (qede_args(eth_dev))
+ return -ENOTSUP;
+
/* Sanity checks and throw warnings */
if (rxmode->enable_scatter)
eth_dev->data->scattered_rx = 1;
@@ -1254,34 +1460,21 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (qede_check_fdir_support(eth_dev))
return -ENOTSUP;
- /* Deallocate resources if held previously. It is needed only if the
- * queue count has been changed from previous configuration. If its
- * going to change then it means RX/TX queue setup will be called
- * again and the fastpath pointers will be reinitialized there.
- */
- if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
- qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
- qede_dealloc_fp_resc(eth_dev);
- /* Proceed with updated queue count */
- qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
- qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
- if (qede_alloc_fp_resc(qdev))
- return -ENOMEM;
- }
+ qede_dealloc_fp_resc(eth_dev);
+ qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
+ qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+ if (qede_alloc_fp_resc(qdev))
+ return -ENOMEM;
- /* VF's MTU has to be set using vport-start where as
- * PF's MTU can be updated via vport-update.
- */
- if (IS_VF(edev)) {
- if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
- return -1;
- } else {
- if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
- return -1;
- }
+ /* If jumbo enabled adjust MTU */
+ if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+ eth_dev->data->mtu =
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN;
- qdev->mtu = rxmode->max_rx_pkt_len;
- qdev->new_mtu = qdev->mtu;
+ if (qede_start_vport(qdev, eth_dev->data->mtu))
+ return -1;
+ qdev->mtu = eth_dev->data->mtu;
/* Enable VLAN offloads by default */
ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
@@ -1359,7 +1552,8 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
@@ -1494,12 +1688,15 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
qede_dev_stop(eth_dev);
qede_stop_vport(edev);
+ qdev->vport_started = false;
qede_fdir_dealloc_resc(eth_dev);
qede_dealloc_fp_resc(eth_dev);
eth_dev->data->nb_rx_queues = 0;
eth_dev->data->nb_tx_queues = 0;
+ /* Bring the link down */
+ qede_dev_set_link_state(eth_dev, false);
qdev->ops->common->slowpath_stop(edev);
qdev->ops->common->remove(edev);
rte_intr_disable(&pci_dev->intr_handle);
@@ -1733,6 +1930,7 @@ qede_reset_xstats(struct rte_eth_dev *dev)
struct ecore_dev *edev = &qdev->edev;
ecore_reset_vport_stats(edev);
+ qede_reset_queue_stats(qdev, true);
}
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
@@ -1768,6 +1966,7 @@ static void qede_reset_stats(struct rte_eth_dev *eth_dev)
struct ecore_dev *edev = &qdev->edev;
ecore_reset_vport_stats(edev);
+ qede_reset_queue_stats(qdev, false);
}
static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
@@ -1865,6 +2064,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
RTE_PTYPE_L4_UDP,
RTE_PTYPE_TUNNEL_VXLAN,
RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_TUNNEL_GENEVE,
/* Inner */
RTE_PTYPE_INNER_L2_ETHER,
RTE_PTYPE_INNER_L2_ETHER_VLAN,
@@ -2159,16 +2359,21 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_dev_info dev_info = {0};
struct qede_fastpath *fp;
+ uint32_t max_rx_pkt_len;
uint32_t frame_size;
uint16_t rx_buf_size;
uint16_t bufsz;
+ bool restart = false;
int i;
PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
- frame_size = mtu + QEDE_ETH_OVERHEAD;
+ max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
- DP_ERR(edev, "MTU %u out of range\n", mtu);
+ DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
+ mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+ ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
return -EINVAL;
}
if (!dev->data->scattered_rx &&
@@ -2182,29 +2387,57 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
*/
dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
- qede_dev_stop(dev);
+ if (dev->data->dev_started) {
+ dev->data->dev_started = 0;
+ qede_dev_stop(dev);
+ restart = true;
+ }
rte_delay_ms(1000);
+ qede_start_vport(qdev, mtu); /* Recreate vport */
qdev->mtu = mtu;
+
/* Fix up RX buf size for all queues of the port */
for_each_rss(i) {
fp = &qdev->fp_array[i];
- bufsz = (uint16_t)rte_pktmbuf_data_room_size(
- fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+ if (fp->rxq != NULL) {
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = frame_size;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "RX buffer size %u\n", rx_buf_size);
+ }
}
- qede_dev_start(dev);
- if (frame_size > ETHER_MAX_LEN)
+ if (max_rx_pkt_len > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ /* Restore config lost due to vport stop */
+ qede_mac_addr_set(dev, &qdev->primary_mac);
+ if (dev->data->promiscuous)
+ qede_promiscuous_enable(dev);
+ else
+ qede_promiscuous_disable(dev);
+
+ if (dev->data->all_multicast)
+ qede_allmulticast_enable(dev);
+ else
+ qede_allmulticast_disable(dev);
+
+ qede_vlan_offload_set(dev, qdev->vlan_offload_mask);
+
+ if (!dev->data->dev_started && restart) {
+ qede_dev_start(dev);
+ dev->data->dev_started = 1;
+ }
+
/* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
/* Reassign back */
dev->rx_pkt_burst = qede_recv_pkts;
dev->tx_pkt_burst = qede_xmit_pkts;
@@ -2213,74 +2446,36 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
}
static int
-qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
- struct rte_eth_udp_tunnel *tunnel_udp,
- bool add)
+qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_tunnel_info tunn; /* @DPDK */
- struct ecore_hwfn *p_hwfn;
- struct ecore_ptt *p_ptt;
uint16_t udp_port;
- int rc, i;
+ int rc;
PMD_INIT_FUNC_TRACE(edev);
memset(&tunn, 0, sizeof(tunn));
- if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
- /* Enable VxLAN tunnel if needed before UDP port update using
- * default MAC/VLAN classification.
- */
- if (add) {
- if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
- DP_INFO(edev,
- "UDP port %u was already configured\n",
- tunnel_udp->udp_port);
- return ECORE_SUCCESS;
- }
- /* Enable VXLAN if it was not enabled while adding
- * VXLAN filter.
- */
- if (!qdev->vxlan.enable) {
- rc = qede_vxlan_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, true, true);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to enable VXLAN "
- "prior to updating UDP port\n");
- return rc;
- }
- }
- udp_port = tunnel_udp->udp_port;
- } else {
- if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
- DP_ERR(edev, "UDP port %u doesn't exist\n",
- tunnel_udp->udp_port);
- return ECORE_INVAL;
- }
- udp_port = 0;
+
+ switch (tunnel_udp->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
}
+ udp_port = 0;
tunn.vxlan_port.b_update_port = true;
tunn.vxlan_port.port = udp_port;
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- if (IS_PF(edev)) {
- p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return -EAGAIN;
- } else {
- p_ptt = NULL;
- }
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
- ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u\n",
- tunn.vxlan_port.port);
- if (IS_PF(edev))
- ecore_ptt_release(p_hwfn, p_ptt);
- return rc;
- }
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
}
qdev->vxlan.udp_port = udp_port;
@@ -2288,26 +2483,145 @@ qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
* VXLAN filters have reached 0 then VxLAN offload can be be
* disabled.
*/
- if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
+ if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
return qede_vxlan_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, false, true);
+ ECORE_TUNN_CLSS_MAC_VLAN, false);
+
+ break;
+
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+
+ udp_port = 0;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * GENEVE filters have reached 0 then GENEVE offload can be be
+ * disabled.
+ */
+ if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
+ return qede_geneve_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false);
+
+ break;
+
+ default:
+ return ECORE_INVAL;
}
return 0;
-}
-static int
-qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
- struct rte_eth_udp_tunnel *tunnel_udp)
-{
- return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
}
-
static int
qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *tunnel_udp)
{
- return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ uint16_t udp_port;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+
+ switch (tunnel_udp->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u for VXLAN was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+
+ /* Enable VxLAN tunnel with default MAC/VLAN classification if
+ * it was not enabled while adding VXLAN filter before UDP port
+ * update.
+ */
+ if (!qdev->vxlan.enable) {
+ rc = qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable VXLAN "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
+ udp_port);
+ return rc;
+ }
+
+ DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
+
+ qdev->vxlan.udp_port = udp_port;
+ break;
+
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u for GENEVE was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+
+ /* Enable GENEVE tunnel with default MAC/VLAN classification if
+ * it was not enabled while adding GENEVE filter before UDP port
+ * update.
+ */
+ if (!qdev->geneve.enable) {
+ rc = qede_geneve_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable GENEVE "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
+ udp_port);
+ return rc;
+ }
+
+ DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
+
+ qdev->geneve.udp_port = udp_port;
+ break;
+
+ default:
+ return ECORE_INVAL;
+ }
+
+ return 0;
}
static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
@@ -2374,113 +2688,116 @@ qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
return ECORE_SUCCESS;
}
-static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op,
- const struct rte_eth_tunnel_filter_conf *conf)
+static int
+_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ __attribute__((unused)) enum rte_filter_op filter_op,
+ enum ecore_tunn_clss *clss,
+ bool add)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum ecore_filter_ucast_type type;
- enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
struct ecore_filter_ucast ucast = {0};
- char str[80];
+ enum ecore_filter_ucast_type type;
uint16_t filter_type = 0;
+ char str[80];
int rc;
- PMD_INIT_FUNC_TRACE(edev);
+ filter_type = conf->filter_type;
+ /* Determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, clss, str);
+ if (*clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Unsupported filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
- switch (filter_op) {
- case RTE_ETH_FILTER_ADD:
- if (IS_VF(edev))
- return qede_vxlan_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, true, true);
+ ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
- filter_type = conf->filter_type;
- /* Determine if the given filter classification is supported */
- qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
- if (clss == MAX_ECORE_TUNN_CLSS) {
- DP_ERR(edev, "Unsupported filter type\n");
- return -EINVAL;
- }
- /* Init tunnel ucast params */
- rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
- conf->filter_type);
- return rc;
+ /* Skip MAC/VLAN if filter is based on VNI */
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, add);
+ if ((rc == 0) && add) {
+ /* Enable accept anyvlan */
+ qede_config_accept_any_vlan(qdev, true);
}
- DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
- str, filter_op, ucast.type);
-
- ucast.opcode = ECORE_FILTER_ADD;
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, add);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
- /* Skip MAC/VLAN if filter is based on VNI */
- if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
- rc = qede_mac_int_ops(eth_dev, &ucast, 1);
- if (rc == 0) {
- /* Enable accept anyvlan */
- qede_config_accept_any_vlan(qdev, true);
- }
- } else {
- rc = qede_ucast_filter(eth_dev, &ucast, 1);
- if (rc == 0)
- rc = ecore_filter_ucast_cmd(edev, &ucast,
- ECORE_SPQ_MODE_CB, NULL);
- }
+ return rc;
+}
- if (rc != ECORE_SUCCESS)
- return rc;
+static int
+qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ const struct rte_eth_tunnel_filter_conf *conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
+ bool add;
+ int rc;
- qdev->vxlan.num_filters++;
- qdev->vxlan.filter_type = filter_type;
- if (!qdev->vxlan.enable)
- return qede_vxlan_enable(eth_dev, clss, true, true);
+ PMD_INIT_FUNC_TRACE(edev);
- break;
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
case RTE_ETH_FILTER_DELETE:
- if (IS_VF(edev))
- return qede_vxlan_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, false, true);
+ add = false;
+ break;
+ default:
+ DP_ERR(edev, "Unsupported operation %d\n", filter_op);
+ return -EINVAL;
+ }
- filter_type = conf->filter_type;
- /* Determine if the given filter classification is supported */
- qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
- if (clss == MAX_ECORE_TUNN_CLSS) {
- DP_ERR(edev, "Unsupported filter type\n");
- return -EINVAL;
- }
- /* Init tunnel ucast params */
- rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
- conf->filter_type);
- return rc;
- }
- DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
- str, filter_op, ucast.type);
+ if (IS_VF(edev))
+ return qede_tunn_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ conf->tunnel_type, add);
- ucast.opcode = ECORE_FILTER_REMOVE;
+ rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
- rc = qede_mac_int_ops(eth_dev, &ucast, 0);
- } else {
- rc = qede_ucast_filter(eth_dev, &ucast, 0);
- if (rc == 0)
- rc = ecore_filter_ucast_cmd(edev, &ucast,
- ECORE_SPQ_MODE_CB, NULL);
+ if (add) {
+ if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
+ qdev->vxlan.num_filters++;
+ qdev->vxlan.filter_type = conf->filter_type;
+ } else { /* GENEVE */
+ qdev->geneve.num_filters++;
+ qdev->geneve.filter_type = conf->filter_type;
}
- if (rc != ECORE_SUCCESS)
- return rc;
- qdev->vxlan.num_filters--;
+ if (!qdev->vxlan.enable || !qdev->geneve.enable)
+ return qede_tunn_enable(eth_dev, clss,
+ conf->tunnel_type,
+ true);
+ } else {
+ if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
+ qdev->vxlan.num_filters--;
+ else /*GENEVE*/
+ qdev->geneve.num_filters--;
/* Disable VXLAN if VXLAN filters become 0 */
- if (qdev->vxlan.num_filters == 0)
- return qede_vxlan_enable(eth_dev, clss, false, true);
- break;
- default:
- DP_ERR(edev, "Unsupported operation %d\n", filter_op);
- return -EINVAL;
+ if ((qdev->vxlan.num_filters == 0) ||
+ (qdev->geneve.num_filters == 0))
+ return qede_tunn_enable(eth_dev, clss,
+ conf->tunnel_type,
+ false);
}
return 0;
@@ -2500,13 +2817,13 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
case RTE_ETH_FILTER_TUNNEL:
switch (filter_conf->tunnel_type) {
case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_TUNNEL_TYPE_GENEVE:
DP_INFO(edev,
"Packet steering to the specified Rx queue"
- " is not supported with VXLAN tunneling");
- return(qede_vxlan_tunn_config(eth_dev, filter_op,
+ " is not supported with UDP tunneling");
+ return(qede_tunn_filter_config(eth_dev, filter_op,
filter_conf));
/* Place holders for future tunneling support */
- case RTE_TUNNEL_TYPE_GENEVE:
case RTE_TUNNEL_TYPE_TEREDO:
case RTE_TUNNEL_TYPE_NVGRE:
case RTE_TUNNEL_TYPE_IP_IN_GRE:
@@ -2790,22 +3107,38 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
if (do_once) {
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
qede_print_adapter_info(adapter);
-#endif
do_once = false;
}
+ /* Bring-up the link */
+ qede_dev_set_link_state(eth_dev, true);
+
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
SLIST_INIT(&adapter->fdir_info.fdir_list_head);
SLIST_INIT(&adapter->vlan_list_head);
SLIST_INIT(&adapter->uc_list_head);
adapter->mtu = ETHER_MTU;
- adapter->new_mtu = ETHER_MTU;
- if (!is_vf)
- if (qede_start_vport(adapter, adapter->mtu))
- return -1;
+ adapter->vport_started = false;
+
+ /* VF tunnel offloads is enabled by default in PF driver */
+ adapter->vxlan.num_filters = 0;
+ adapter->geneve.num_filters = 0;
+ if (is_vf) {
+ adapter->vxlan.enable = true;
+ adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
+ ETH_TUNNEL_FILTER_IVLAN;
+ adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
+ adapter->geneve.enable = true;
+
+ adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
+ ETH_TUNNEL_FILTER_IVLAN;
+ adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
+ } else {
+ adapter->vxlan.enable = false;
+ adapter->geneve.enable = false;
+ }
DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
adapter->primary_mac.addr_bytes[0],
@@ -2961,3 +3294,15 @@ RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
+
+RTE_INIT(qede_init_log);
+static void
+qede_init_log(void)
+{
+ qede_logtype_init = rte_log_register("pmd.net.qede.init");
+ if (qede_logtype_init >= 0)
+ rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
+ qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
+ if (qede_logtype_driver >= 0)
+ rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 021de5c0..23f7e0e2 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -13,7 +13,7 @@
#include <sys/queue.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_dev.h>
#include <rte_ip.h>
@@ -45,7 +45,7 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 6
+#define QEDE_PMD_VERSION_MINOR 7
#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
@@ -166,11 +166,14 @@ struct qede_fdir_info {
SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
};
-struct qede_vxlan_tunn {
+/* IANA assigned default UDP ports for encapsulation protocols */
+#define QEDE_VXLAN_DEF_PORT (4789)
+#define QEDE_GENEVE_DEF_PORT (6081)
+
+struct qede_udp_tunn {
bool enable;
uint16_t num_filters;
uint16_t filter_type;
-#define QEDE_VXLAN_DEF_PORT (4789)
uint16_t udp_port;
};
@@ -184,7 +187,7 @@ struct qede_dev {
struct ecore_sb_info *sb_array;
struct qede_fastpath *fp_array;
uint16_t mtu;
- uint16_t new_mtu;
+ bool enable_tx_switching;
bool rss_enable;
struct rte_eth_rss_conf rss_conf;
uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
@@ -202,10 +205,13 @@ struct qede_dev {
SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
uint16_t num_uc_addr;
bool handle_hw_err;
- struct qede_vxlan_tunn vxlan;
+ struct qede_udp_tunn vxlan;
+ struct qede_udp_tunn geneve;
struct qede_fdir_info fdir_info;
bool vlan_strip_flg;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
+ bool vport_started;
+ int vlan_offload_mask;
void *ethdev;
};
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
index 15821151..159315e7 100644
--- a/drivers/net/qede/qede_logs.h
+++ b/drivers/net/qede/qede_logs.h
@@ -9,64 +9,55 @@
#ifndef _QEDE_LOGS_H_
#define _QEDE_LOGS_H_
-#define DP_ERR(p_dev, fmt, ...) \
- rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, \
- "[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- (p_dev)->name ? (p_dev)->name : "", \
+extern int qede_logtype_driver;
+
+#define DP_ERR(p_dev, fmt, ...) \
+ rte_log(RTE_LOG_ERR, qede_logtype_driver, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__)
#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
do { \
if (is_assert) \
- rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,\
+ rte_log(RTE_LOG_ERR, qede_logtype_driver,\
"[QEDE PMD: (%s)]%s:" fmt, \
(p_dev)->name ? (p_dev)->name : "", \
__func__, \
##__VA_ARGS__); \
else \
- rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
+ rte_log(RTE_LOG_NOTICE, qede_logtype_driver,\
"[QEDE PMD: (%s)]%s:" fmt, \
(p_dev)->name ? (p_dev)->name : "", \
__func__, \
##__VA_ARGS__); \
} while (0)
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
#define DP_INFO(p_dev, fmt, ...) \
- rte_log(RTE_LOG_INFO, RTE_LOGTYPE_PMD, \
+ rte_log(RTE_LOG_INFO, qede_logtype_driver, \
"[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
(p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__)
-#else
-#define DP_INFO(p_dev, fmt, ...) do { } while (0)
-#endif
-#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
-#define DP_VERBOSE(p_dev, module, fmt, ...) \
-do { \
- if ((p_dev)->dp_module & module) \
- rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_PMD, \
- "[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- (p_dev)->name ? (p_dev)->name : "", \
- ##__VA_ARGS__); \
-} while (0)
-#else
-#define DP_VERBOSE(p_dev, fmt, ...) do { } while (0)
-#endif
+#define DP_VERBOSE(p_dev, module, fmt, ...) \
+ do { \
+ if ((p_dev)->dp_module & module) \
+ rte_log(RTE_LOG_DEBUG, qede_logtype_driver, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ ##__VA_ARGS__); \
+ } while (0)
-#define PMD_INIT_LOG(level, edev, fmt, args...) \
- rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
- "[qede_pmd: %s] %s() " fmt "\n", \
- (edev)->name, __func__, ##args)
+extern int qede_logtype_init;
+#define PMD_INIT_LOG(level, edev, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, qede_logtype_init, \
+ "[qede_pmd: %s] %s() " fmt "\n", \
+ (edev)->name, __func__, ##args)
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE(edev) PMD_INIT_LOG(DEBUG, edev, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE(edev) do { } while (0)
-#endif
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
#define PMD_TX_LOG(level, q, fmt, args...) \
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 01a24e54..0de7c6b8 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -84,7 +84,6 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->port_id = dev->data->port_id;
max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
- qdev->mtu = max_rx_pkt_len;
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
@@ -97,9 +96,10 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
if (dev->data->scattered_rx)
- rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+ rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
else
- rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
+ rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
/* Align to cache-line size if needed */
rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
@@ -158,7 +158,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
qdev->fp_array[queue_idx].rxq = rxq;
DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
- queue_idx, nb_desc, qdev->mtu, socket_id);
+ queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
return 0;
}
@@ -417,6 +417,8 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
fp = &qdev->fp_array[sb_idx];
+ if (!fp)
+ continue;
fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
RTE_CACHE_LINE_SIZE);
if (!fp->sb_info) {
@@ -448,6 +450,8 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
fp = &qdev->fp_array[sb_idx];
+ if (!fp)
+ continue;
DP_INFO(edev, "Free sb_info index 0x%x\n",
fp->sb_info->igu_sb_id);
if (fp->sb_info) {
@@ -812,12 +816,18 @@ void qede_stop_queues(struct rte_eth_dev *eth_dev)
}
}
-static bool qede_tunn_exist(uint16_t flag)
+static inline bool qede_tunn_exist(uint16_t flag)
{
return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
}
+static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
+{
+ return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
+}
+
/*
* qede_check_tunn_csum_l4:
* Returns:
@@ -844,33 +854,51 @@ static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
return 0;
}
-/* Returns outer L3 and L4 packet_type for tunneled packets */
+/* Returns outer L2, L3 and L4 packet_type for tunneled packets */
static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
{
uint32_t packet_type = RTE_PTYPE_UNKNOWN;
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
struct ipv6_hdr *ipv6_hdr;
+ struct vlan_hdr *vlan_hdr;
+ uint16_t ethertype;
+ bool vlan_tagged = 0;
+ uint16_t len;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ len = sizeof(struct ether_hdr);
+ ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
+
+ /* Note: Valid only if VLAN stripping is disabled */
+ if (ethertype == ETHER_TYPE_VLAN) {
+ vlan_tagged = 1;
+ vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+ len += sizeof(struct vlan_hdr);
+ ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
+ }
+
+ if (ethertype == ETHER_TYPE_IPv4) {
packet_type |= RTE_PTYPE_L3_IPV4;
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
- sizeof(struct ether_hdr));
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);
if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L4_TCP;
else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
packet_type |= RTE_PTYPE_L4_UDP;
- } else if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+ } else if (ethertype == ETHER_TYPE_IPv6) {
packet_type |= RTE_PTYPE_L3_IPV6;
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
- sizeof(struct ether_hdr));
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);
if (ipv6_hdr->proto == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L4_TCP;
else if (ipv6_hdr->proto == IPPROTO_UDP)
packet_type |= RTE_PTYPE_L4_UDP;
}
+ if (vlan_tagged)
+ packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+ else
+ packet_type |= RTE_PTYPE_L2_ETHER;
+
return packet_type;
}
@@ -1163,17 +1191,17 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
[QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
[QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
- RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_TUNNEL_GENEVE,
[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
- RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_TUNNEL_GRE,
[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_TUNNEL_VXLAN,
[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
- RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_TUNNEL_GENEVE,
[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
- RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_TUNNEL_GRE,
[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
- RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_TUNNEL_VXLAN,
[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
@@ -1253,7 +1281,7 @@ print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
uint8_t bitfield)
{
PMD_RX_LOG(INFO, rxq,
- "len 0x%x bf 0x%x hash_val 0x%x"
+ "len 0x%04x bf 0x%04x hash_val 0x%x"
" ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
" inner_l2=%s inner_l3=%s inner_l4=%s\n",
m->data_len, bitfield, m->hash.rss,
@@ -1404,47 +1432,62 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
ol_flags |= PKT_RX_L4_CKSUM_BAD;
} else {
ol_flags |= PKT_RX_L4_CKSUM_GOOD;
- if (tpa_start_flg)
- flags =
- cqe_start_tpa->tunnel_pars_flags.flags;
- else
- flags = fp_cqe->tunnel_pars_flags.flags;
- tunn_parse_flag = flags;
- /* Tunnel_type */
- packet_type =
- qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
-
- /* Inner header */
- packet_type |=
- qede_rx_cqe_to_pkt_type_inner(parse_flag);
-
- /* Outer L3/L4 types is not available in CQE */
- packet_type |=
- qede_rx_cqe_to_pkt_type_outer(rx_mb);
}
- } else {
- PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
- if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+
+ if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
PMD_RX_LOG(ERR, rxq,
- "L4 csum failed, flags = 0x%x\n",
- parse_flag);
- rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ "Outer L3 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_EIP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
- }
- if (unlikely(qede_check_notunn_csum_l3(rx_mb,
- parse_flag))) {
- PMD_RX_LOG(ERR, rxq,
- "IP csum failed, flags = 0x%x\n",
- parse_flag);
- rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_IP_CKSUM_BAD;
- } else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
- packet_type =
- qede_rx_cqe_to_pkt_type(parse_flag);
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
}
+
+ if (tpa_start_flg)
+ flags = cqe_start_tpa->tunnel_pars_flags.flags;
+ else
+ flags = fp_cqe->tunnel_pars_flags.flags;
+ tunn_parse_flag = flags;
+
+ /* Tunnel_type */
+ packet_type =
+ qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
+
+ /* Inner header */
+ packet_type |=
+ qede_rx_cqe_to_pkt_type_inner(parse_flag);
+
+ /* Outer L3/L4 types is not available in CQE */
+ packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+
+ /* Outer L3/L4 types is not available in CQE.
+ * Need to add offset to parse correctly,
+ */
+ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+ packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+ }
+
+ /* Common handling for non-tunnel packets and for inner
+ * headers in the case of tunnel.
+ */
+ if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
+ PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
}
if (CQE_HAS_VLAN(parse_flag) ||
@@ -1549,7 +1592,8 @@ next_cqe:
/* Populate scatter gather buffer descriptor fields */
static inline uint16_t
qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
- struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3)
+ struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
+ uint16_t start_seg)
{
struct qede_tx_queue *txq = p_txq;
struct eth_tx_bd *tx_bd = NULL;
@@ -1558,7 +1602,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
/* Check for scattered buffers */
while (m_seg) {
- if (nb_segs == 0) {
+ if (start_seg == 0) {
if (!*bd2) {
*bd2 = (struct eth_tx_2nd_bd *)
ecore_chain_produce(&txq->tx_pbl);
@@ -1568,7 +1612,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
mapping = rte_mbuf_data_iova(m_seg);
QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
- } else if (nb_segs == 1) {
+ } else if (start_seg == 1) {
if (!*bd3) {
*bd3 = (struct eth_tx_3rd_bd *)
ecore_chain_produce(&txq->tx_pbl);
@@ -1606,20 +1650,24 @@ print_tx_bd_info(struct qede_tx_queue *txq,
if (bd1)
PMD_TX_LOG(INFO, txq,
- "BD1: nbytes=%u nbds=%u bd_flags=%04x bf=%04x",
- rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
- bd1->data.bd_flags.bitfields,
- rte_cpu_to_le_16(bd1->data.bitfields));
+ "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
+ rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
+ bd1->data.bd_flags.bitfields,
+ rte_cpu_to_le_16(bd1->data.bitfields));
if (bd2)
PMD_TX_LOG(INFO, txq,
- "BD2: nbytes=%u bf=%04x\n",
- rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1);
+ "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
+ rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
+ bd2->data.bitfields2, bd2->data.tunn_ip_size);
if (bd3)
PMD_TX_LOG(INFO, txq,
- "BD3: nbytes=%u bf=%04x mss=%u\n",
- rte_cpu_to_le_16(bd3->nbytes),
- rte_cpu_to_le_16(bd3->data.bitfields),
- rte_cpu_to_le_16(bd3->data.lso_mss));
+ "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
+ "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
+ rte_cpu_to_le_16(bd3->nbytes),
+ rte_cpu_to_le_16(bd3->data.bitfields),
+ rte_cpu_to_le_16(bd3->data.lso_mss),
+ bd3->data.tunn_l4_hdr_start_offset_w,
+ bd3->data.tunn_hdr_size_w);
rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
@@ -1792,7 +1840,9 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
PKT_TX_TUNNEL_VXLAN) ||
((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
- PKT_TX_TUNNEL_MPLSINUDP)) {
+ PKT_TX_TUNNEL_MPLSINUDP) ||
+ ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_GENEVE)) {
/* Check against max which is Tunnel IPv6 + ext */
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
@@ -1897,6 +1947,10 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* and BD2 onwards for data.
*/
hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ if (tunn_flg)
+ hdr_size += mbuf->outer_l2_len +
+ mbuf->outer_l3_len;
+
bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
@@ -2013,9 +2067,11 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Handle fragmented MBUF */
m_seg = mbuf->next;
+
/* Encode scatter gather buffer descriptors if required */
- nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3);
+ nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
bd1->data.nbds = nbds + nb_frags;
+
txq->nb_tx_avail -= bd1->data.nbds;
txq->sw_tx_prod++;
rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
@@ -2023,7 +2079,6 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
- PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d", lso_flg, tunn_flg);
#endif
nb_pkt_sent++;
txq->xmit_pkts++;
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index acf9e475..f1d36661 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -64,7 +64,7 @@
#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
~(QEDE_FW_RX_ALIGN_END - 1))
/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
-#define QEDE_ETH_OVERHEAD ((ETHER_HDR_LEN) + ((2 * QEDE_VLAN_TAG_SIZE)) \
+#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
+ (QEDE_LLC_SNAP_HDR_LEN))
#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
@@ -73,7 +73,8 @@
ETH_RSS_IPV6 |\
ETH_RSS_NONFRAG_IPV6_TCP |\
ETH_RSS_NONFRAG_IPV6_UDP |\
- ETH_RSS_VXLAN)
+ ETH_RSS_VXLAN |\
+ ETH_RSS_GENEVE)
#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
@@ -151,6 +152,7 @@
PKT_TX_QINQ_PKT | \
PKT_TX_VLAN_PKT | \
PKT_TX_TUNNEL_VXLAN | \
+ PKT_TX_TUNNEL_GENEVE | \
PKT_TX_TUNNEL_MPLSINUDP)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
diff --git a/drivers/net/ring/Makefile b/drivers/net/ring/Makefile
index 085ffa57..517312e0 100644
--- a/drivers/net/ring/Makefile
+++ b/drivers/net/ring/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/ring/meson.build b/drivers/net/ring/meson.build
new file mode 100644
index 00000000..e877a4b4
--- /dev/null
+++ b/drivers/net/ring/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+sources = files('rte_eth_ring.c')
+install_headers('rte_eth_ring.h')
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index a73c631f..df13c44b 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -1,39 +1,10 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include "rte_eth_ring.h"
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -89,7 +60,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_AUTONEG
+ .link_autoneg = ETH_LINK_AUTONEG
};
static uint16_t
diff --git a/drivers/net/ring/rte_eth_ring.h b/drivers/net/ring/rte_eth_ring.h
index 4ff83eca..59e074d0 100644
--- a/drivers/net/ring/rte_eth_ring.h
+++ b/drivers/net/ring/rte_eth_ring.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _RTE_ETH_RING_H_
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 2cfd62a2..8a671dd2 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,32 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
#
-# BSD LICENSE
-#
-# Copyright (c) 2016-2017 Solarflare Communications Inc.
+# Copyright (c) 2016-2018 Solarflare Communications Inc.
# All rights reserved.
#
# This software was jointly developed between OKTET Labs (under contract
# for Solarflare) and Solarflare Communications, Inc.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# 1. Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -35,6 +13,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_sfc_efx.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -I$(SRCDIR)/base/
CFLAGS += -I$(SRCDIR)
CFLAGS += -O3
@@ -81,7 +60,7 @@ BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.
$(foreach obj, $(BASE_DRIVER_OBJS), \
$(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
-EXPORT_MAP := rte_pmd_sfc_efx_version.map
+EXPORT_MAP := rte_pmd_sfc_version.map
LIBABIVER := 1
@@ -122,6 +101,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_phy.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_port.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_rx.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_sram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_tunnel.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_tx.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_vpd.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += mcdi_mon.c
diff --git a/drivers/net/sfc/base/README b/drivers/net/sfc/base/README
index 9019e8ba..685c502c 100644
--- a/drivers/net/sfc/base/README
+++ b/drivers/net/sfc/base/README
@@ -1,28 +1,8 @@
+ SPDX-License-Identifier: BSD-3-Clause
- Copyright (c) 2006-2016 Solarflare Communications Inc.
+ Copyright (c) 2006-2018 Solarflare Communications Inc.
All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
Solarflare libefx driver library
================================
diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c
index d9389dab..05700c5c 100644
--- a/drivers/net/sfc/base/ef10_ev.c
+++ b/drivers/net/sfc/base/ef10_ev.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -463,7 +439,7 @@ ef10_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint32_t us,
__in uint32_t flags,
@@ -477,7 +453,8 @@ ef10_ev_qcreate(
EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
- if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+ if (!ISP2(ndescs) ||
+ (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
rc = EINVAL;
goto fail1;
}
@@ -526,7 +503,8 @@ ef10_ev_qcreate(
* it will choose the best settings for low latency, otherwise
* it will choose the best settings for throughput.
*/
- rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
+ rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us,
+ flags);
if (rc != 0)
goto fail4;
} else {
@@ -542,7 +520,7 @@ ef10_ev_qcreate(
* to choose it.)
*/
boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
- rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
+ rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
low_latency);
if (rc != 0)
goto fail5;
@@ -573,7 +551,7 @@ ef10_ev_qdestroy(
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD);
- (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
+ (void) efx_mcdi_fini_evq(enp, eep->ee_index);
}
__checkReturn efx_rc_t
@@ -774,7 +752,7 @@ ef10_ev_rx_packed_stream(
__in_opt void *arg)
{
uint32_t label;
- uint32_t next_read_lbits;
+ uint32_t pkt_count_lbits;
uint16_t flags;
boolean_t should_abort;
efx_evq_rxq_state_t *eersp;
@@ -782,23 +760,28 @@ ef10_ev_rx_packed_stream(
unsigned int current_id;
boolean_t new_buffer;
- next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+ pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
flags = 0;
eersp = &eep->ee_rxq_state[label];
- pkt_count = (EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS) + 1 +
- next_read_lbits - eersp->eers_rx_stream_npackets) &
+
+ /*
+ * RX_DSC_PTR_LBITS has least significant bits of the global
+ * (not per-buffer) packet counter. It is guaranteed that
+ * maximum number of completed packets fits in lbits-mask.
+ * So, modulo lbits-mask arithmetic should be used to calculate
+ * packet counter increment.
+ */
+ pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
eersp->eers_rx_stream_npackets += pkt_count;
if (new_buffer) {
flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
- if (eersp->eers_rx_packed_stream_credits <
- EFX_RX_PACKED_STREAM_MAX_CREDITS)
- eersp->eers_rx_packed_stream_credits++;
+ eersp->eers_rx_packed_stream_credits++;
eersp->eers_rx_read_ptr++;
}
current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
@@ -1336,10 +1319,14 @@ ef10_ev_rxlabel_init(
__in efx_evq_t *eep,
__in efx_rxq_t *erp,
__in unsigned int label,
- __in boolean_t packed_stream)
+ __in efx_rxq_type_t type)
{
efx_evq_rxq_state_t *eersp;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
+#endif
+ _NOTE(ARGUNUSED(type))
EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
eersp = &eep->ee_rxq_state[label];
@@ -1363,7 +1350,7 @@ ef10_ev_rxlabel_init(
eersp->eers_rx_packed_stream = packed_stream;
if (packed_stream) {
eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
- (EFX_RX_PACKED_STREAM_MEM_PER_CREDIT /
+ EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
/*
@@ -1377,8 +1364,6 @@ ef10_ev_rxlabel_init(
EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
EFX_RX_PACKED_STREAM_MAX_CREDITS);
}
-#else
- EFSYS_ASSERT(!packed_stream);
#endif
}
diff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c
index e1faf1dd..27b59987 100644
--- a/drivers/net/sfc/base/ef10_filter.c
+++ b/drivers/net/sfc/base/ef10_filter.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -995,7 +971,7 @@ ef10_filter_supported_filters(
size_t list_length;
uint32_t i;
efx_rc_t rc;
- uint32_t all_filter_flags =
+ efx_filter_match_flags_t all_filter_flags =
(EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST |
EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT |
EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT |
@@ -1243,8 +1219,8 @@ typedef struct ef10_filter_encap_entry_s {
uint32_t inner_frame_match;
} ef10_filter_encap_entry_t;
-#define EF10_ENCAP_FILTER_ENTRY(ipv, encap_type, inner_frame_match) \
- { EFX_ETHER_TYPE_##ipv, EFX_TUNNEL_PROTOCOL_##encap_type, \
+#define EF10_ENCAP_FILTER_ENTRY(ipv, encap_type, inner_frame_match) \
+ { EFX_ETHER_TYPE_##ipv, EFX_TUNNEL_PROTOCOL_##encap_type, \
EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_##inner_frame_match }
static ef10_filter_encap_entry_t ef10_filter_encap_list[] = {
@@ -1305,8 +1281,8 @@ ef10_filter_insert_encap_filters(
*/
if ((mulcst == B_FALSE) &&
(encap_filter->inner_frame_match ==
- EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST))
- continue;
+ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST))
+ continue;
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
diff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h
index 8f9eb7a3..e79f4d53 100644
--- a/drivers/net/sfc/base/ef10_impl.h
+++ b/drivers/net/sfc/base/ef10_impl.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2015-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_EF10_IMPL_H
@@ -50,8 +26,9 @@ extern "C" {
*/
#define EF10_NVRAM_CHUNK 0x80
-/* Alignment requirement for value written to RX WPTR:
- * the WPTR must be aligned to an 8 descriptor boundary
+/*
+ * Alignment requirement for value written to RX WPTR: the WPTR must be aligned
+ * to an 8 descriptor boundary.
*/
#define EF10_RX_WPTR_ALIGN 8
@@ -80,7 +57,7 @@ ef10_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint32_t us,
__in uint32_t flags,
@@ -117,7 +94,7 @@ ef10_ev_rxlabel_init(
__in efx_evq_t *eep,
__in efx_rxq_t *erp,
__in unsigned int label,
- __in boolean_t packed_stream);
+ __in efx_rxq_type_t type);
void
ef10_ev_rxlabel_fini(
@@ -443,6 +420,14 @@ ef10_nvram_partn_read(
__in size_t size);
extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_backup(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
ef10_nvram_partn_erase(
__in efx_nic_t *enp,
__in uint32_t partn,
@@ -460,7 +445,8 @@ ef10_nvram_partn_write(
extern __checkReturn efx_rc_t
ef10_nvram_partn_rw_finish(
__in efx_nic_t *enp,
- __in uint32_t partn);
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp);
extern __checkReturn efx_rc_t
ef10_nvram_partn_get_version(
@@ -496,8 +482,7 @@ ef10_nvram_buffer_find_item_start(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
- __out uint32_t *startp
- );
+ __out uint32_t *startp);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_find_end(
@@ -505,8 +490,7 @@ ef10_nvram_buffer_find_end(
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
- __out uint32_t *endp
- );
+ __out uint32_t *endp);
extern __checkReturn __success(return != B_FALSE) boolean_t
ef10_nvram_buffer_find_item(
@@ -515,8 +499,7 @@ ef10_nvram_buffer_find_item(
__in size_t buffer_size,
__in uint32_t offset,
__out uint32_t *startp,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_get_item(
@@ -528,8 +511,7 @@ ef10_nvram_buffer_get_item(
__out_bcount_part(item_max_size, *lengthp)
caddr_t itemp,
__in size_t item_max_size,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_insert_item(
@@ -539,8 +521,7 @@ ef10_nvram_buffer_insert_item(
__in uint32_t offset,
__in_bcount(length) caddr_t keyp,
__in uint32_t length,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_delete_item(
@@ -549,15 +530,13 @@ ef10_nvram_buffer_delete_item(
__in size_t buffer_size,
__in uint32_t offset,
__in uint32_t length,
- __in uint32_t end
- );
+ __in uint32_t end);
extern __checkReturn efx_rc_t
ef10_nvram_buffer_finish(
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- );
+ __in size_t buffer_size);
#endif /* EFSYS_OPT_NVRAM */
@@ -659,7 +638,7 @@ ef10_tx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint16_t flags,
__in efx_evq_t *eep,
@@ -670,13 +649,13 @@ extern void
ef10_tx_qdestroy(
__in efx_txq_t *etp);
-extern __checkReturn efx_rc_t
+extern __checkReturn efx_rc_t
ef10_tx_qpost(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_buffer_t *eb,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp);
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *ebp,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
extern void
ef10_tx_qpush(
@@ -686,8 +665,8 @@ ef10_tx_qpush(
#if EFSYS_OPT_RX_PACKED_STREAM
extern void
-ef10_rx_qps_update_credits(
- __in efx_rxq_t *erp);
+ef10_rx_qpush_ps_credits(
+ __in efx_rxq_t *erp);
extern __checkReturn uint8_t *
ef10_rx_qps_packet_info(
@@ -786,7 +765,7 @@ ef10_tx_qstats_update(
typedef uint32_t efx_piobuf_handle_t;
-#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1)
+#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t)-1)
extern __checkReturn efx_rc_t
ef10_nic_pio_alloc(
@@ -945,14 +924,14 @@ ef10_rx_prefix_pktlen(
__in uint8_t *buffer,
__out uint16_t *lengthp);
-extern void
+extern void
ef10_rx_qpost(
- __in efx_rxq_t *erp,
- __in_ecount(n) efsys_dma_addr_t *addrp,
- __in size_t size,
- __in unsigned int n,
- __in unsigned int completed,
- __in unsigned int added);
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added);
extern void
ef10_rx_qpush(
@@ -974,9 +953,11 @@ ef10_rx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
+ __in uint32_t type_data,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
+ __in unsigned int flags,
__in efx_evq_t *eep,
__in efx_rxq_t *erp);
@@ -1025,7 +1006,7 @@ typedef struct ef10_filter_entry_s {
* IPv4 or IPv6 outer frame, VXLAN, GENEVE or NVGRE packet type, and unicast or
* multicast inner frames.
*/
-#define EFX_EF10_FILTER_ENCAP_FILTERS_MAX 12
+#define EFX_EF10_FILTER_ENCAP_FILTERS_MAX 12
typedef struct ef10_filter_table_s {
ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS];
@@ -1192,9 +1173,9 @@ ef10_external_port_mapping(
/* Minimum space for packet in packed stream mode */
#define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \
P2ROUNDUP(EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \
- EFX_MAC_PDU_MIN + \
- EFX_RX_PACKED_STREAM_ALIGNMENT, \
- EFX_RX_PACKED_STREAM_ALIGNMENT)
+ EFX_MAC_PDU_MIN + \
+ EFX_RX_PACKED_STREAM_ALIGNMENT, \
+ EFX_RX_PACKED_STREAM_ALIGNMENT)
/* Maximum number of credits */
#define EFX_RX_PACKED_STREAM_MAX_CREDITS 127
diff --git a/drivers/net/sfc/base/ef10_intr.c b/drivers/net/sfc/base/ef10_intr.c
index 16be3d8c..f79c44e3 100644
--- a/drivers/net/sfc/base/ef10_intr.c
+++ b/drivers/net/sfc/base/ef10_intr.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/ef10_mac.c b/drivers/net/sfc/base/ef10_mac.c
index 488633f5..db7692ee 100644
--- a/drivers/net/sfc/base/ef10_mac.c
+++ b/drivers/net/sfc/base/ef10_mac.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/ef10_mcdi.c b/drivers/net/sfc/base/ef10_mcdi.c
index 5a26bda2..1f9e573f 100644
--- a/drivers/net/sfc/base/ef10_mcdi.c
+++ b/drivers/net/sfc/base/ef10_mcdi.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -130,7 +106,7 @@ ef10_mcdi_get_timeout(
case MC_CMD_NVRAM_ERASE:
case MC_CMD_LICENSING_V3:
case MC_CMD_NVRAM_UPDATE_FINISH:
- if (encp->enc_fw_verified_nvram_update_required != B_FALSE) {
+ if (encp->enc_nvram_update_verify_result_supported != B_FALSE) {
/*
* Potentially longer running commands, which firmware
* may choose to process in a background thread.
diff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c
index 58d1b0af..eb9ec2be 100644
--- a/drivers/net/sfc/base/ef10_nic.c
+++ b/drivers/net/sfc/base/ef10_nic.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -1054,7 +1030,7 @@ ef10_get_datapath_caps(
* and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
* partition and report the result).
*/
- encp->enc_fw_verified_nvram_update_required =
+ encp->enc_nvram_update_verify_result_supported =
CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ?
B_TRUE : B_FALSE;
@@ -1076,12 +1052,20 @@ ef10_get_datapath_caps(
* Check if firmware supports VXLAN and NVGRE tunnels.
* The capability indicates Geneve protocol support as well.
*/
- if (CAP_FLAG(flags, VXLAN_NVGRE))
+ if (CAP_FLAG(flags, VXLAN_NVGRE)) {
encp->enc_tunnel_encapsulations_supported =
(1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
(1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
(1u << EFX_TUNNEL_PROTOCOL_NVGRE);
+ EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
+ encp->enc_tunnel_config_udp_entries_max =
+ EFX_TUNNEL_MAXNENTRIES;
+ } else {
+ encp->enc_tunnel_config_udp_entries_max = 0;
+ }
+
#undef CAP_FLAG
#undef CAP_FLAG2
@@ -1159,7 +1143,7 @@ fail1:
* For the Huntington family, the current port mode cannot be discovered,
* so the mapping used is instead the last match in the table to the full
* set of port modes to which the NIC can be configured. Therefore the
- * ordering of entries in the the mapping table is significant.
+ * ordering of entries in the mapping table is significant.
*/
static struct {
efx_family_t family;
diff --git a/drivers/net/sfc/base/ef10_nvram.c b/drivers/net/sfc/base/ef10_nvram.c
index 3f9d3750..1904597c 100644
--- a/drivers/net/sfc/base/ef10_nvram.c
+++ b/drivers/net/sfc/base/ef10_nvram.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -672,6 +648,7 @@ ef10_nvram_buffer_validate(
int pos;
efx_rc_t rc;
+ _NOTE(ARGUNUSED(enp, partn))
EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
if ((partn_data == NULL) || (partn_size == 0)) {
@@ -1282,6 +1259,8 @@ ef10_nvram_buf_read_tlv(
caddr_t value;
efx_rc_t rc;
+ _NOTE(ARGUNUSED(enp))
+
if ((seg_data == NULL) || (max_seg_size == 0)) {
rc = EINVAL;
goto fail1;
@@ -1937,14 +1916,37 @@ ef10_nvram_partn_read(
__in size_t size)
{
/*
- * Read requests which come in through the EFX API expect to
- * read the current, active partition.
+ * An A/B partition has two data stores (current and backup).
+ * Read requests which come in through the EFX API expect to read the
+ * current, active store of an A/B partition. For non A/B partitions,
+ * there is only a single store and so the mode param is ignored.
*/
return ef10_nvram_partn_read_mode(enp, partn, offset, data, size,
MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT);
}
__checkReturn efx_rc_t
+ef10_nvram_partn_read_backup(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ /*
+ * An A/B partition has two data stores (current and backup).
+ * Read the backup store of an A/B partition (i.e. the store currently
+ * being written to if the partition is locked).
+ *
+ * This is needed when comparing the existing partition content to avoid
+ * unnecessary writes, or to read back what has been written to check
+ * that the writes have succeeded.
+ */
+ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP);
+}
+
+ __checkReturn efx_rc_t
ef10_nvram_partn_erase(
__in efx_nic_t *enp,
__in uint32_t partn,
@@ -2047,15 +2049,15 @@ fail1:
ef10_nvram_partn_unlock(
__in efx_nic_t *enp,
__in uint32_t partn,
- __out_opt uint32_t *resultp)
+ __out_opt uint32_t *verify_resultp)
{
boolean_t reboot = B_FALSE;
efx_rc_t rc;
- if (resultp != NULL)
- *resultp = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
+ if (verify_resultp != NULL)
+ *verify_resultp = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
- rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, resultp);
+ rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, verify_resultp);
if (rc != 0)
goto fail1;
@@ -2106,83 +2108,48 @@ fail1:
typedef struct ef10_parttbl_entry_s {
unsigned int partn;
- unsigned int port;
+ unsigned int port_mask;
efx_nvram_type_t nvtype;
} ef10_parttbl_entry_t;
+/* Port mask values */
+#define PORT_1 (1u << 1)
+#define PORT_2 (1u << 2)
+#define PORT_3 (1u << 3)
+#define PORT_4 (1u << 4)
+#define PORT_ALL (0xffffffffu)
+
+#define PARTN_MAP_ENTRY(partn, port_mask, nvtype) \
+{ (NVRAM_PARTITION_TYPE_##partn), (PORT_##port_mask), (EFX_NVRAM_##nvtype) }
+
/* Translate EFX NVRAM types to firmware partition types */
static ef10_parttbl_entry_t hunt_parttbl[] = {
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN},
- {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM},
- {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM},
- {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM},
- {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM},
- {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
- {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
- {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 3, EFX_NVRAM_BOOTROM_CFG},
- {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 4, EFX_NVRAM_BOOTROM_CFG},
- {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG},
- {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG},
- {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG},
- {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG},
- {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
- {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
- {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA},
- {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA},
- {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
- {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
- {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP},
- {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP},
- {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
- {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE},
- {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE},
- {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE}
+ /* partn ports nvtype */
+ PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE),
+ PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN),
+ PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT0, 1, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT1, 2, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT2, 3, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT3, 4, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG),
+ PARTN_MAP_ENTRY(FPGA, ALL, FPGA),
+ PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP),
+ PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE),
};
static ef10_parttbl_entry_t medford_parttbl[] = {
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN},
- {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN},
- {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM},
- {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM},
- {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM},
- {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM},
- {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
- {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 2, EFX_NVRAM_BOOTROM_CFG},
- {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 3, EFX_NVRAM_BOOTROM_CFG},
- {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 4, EFX_NVRAM_BOOTROM_CFG},
- {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG},
- {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG},
- {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG},
- {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG},
- {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
- {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
- {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA},
- {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA},
- {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
- {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
- {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP},
- {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP},
- {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
- {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE},
- {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE},
- {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE},
- {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 1, EFX_NVRAM_UEFIROM},
- {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 2, EFX_NVRAM_UEFIROM},
- {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 3, EFX_NVRAM_UEFIROM},
- {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 4, EFX_NVRAM_UEFIROM}
+ /* partn ports nvtype */
+ PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE),
+ PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN),
+ PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG, ALL, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG),
+ PARTN_MAP_ENTRY(FPGA, ALL, FPGA),
+ PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP),
+ PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE),
+ PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM),
+ PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE),
};
static __checkReturn efx_rc_t
@@ -2220,6 +2187,7 @@ ef10_nvram_type_to_partn(
size_t parttbl_rows = 0;
unsigned int i;
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
EFSYS_ASSERT(partnp != NULL);
@@ -2227,8 +2195,8 @@ ef10_nvram_type_to_partn(
for (i = 0; i < parttbl_rows; i++) {
ef10_parttbl_entry_t *entry = &parttbl[i];
- if (entry->nvtype == type &&
- entry->port == emip->emi_port) {
+ if ((entry->nvtype == type) &&
+ (entry->port_mask & (1u << emip->emi_port))) {
*partnp = entry->partn;
return (0);
}
@@ -2257,8 +2225,8 @@ ef10_nvram_partn_to_type(
for (i = 0; i < parttbl_rows; i++) {
ef10_parttbl_entry_t *entry = &parttbl[i];
- if (entry->partn == partn &&
- entry->port == emip->emi_port) {
+ if ((entry->partn == partn) &&
+ (entry->port_mask & (1u << emip->emi_port))) {
*typep = entry->nvtype;
return (0);
}
@@ -2346,16 +2314,27 @@ ef10_nvram_partn_rw_start(
__in uint32_t partn,
__out size_t *chunk_sizep)
{
+ uint32_t write_size = 0;
efx_rc_t rc;
- if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ NULL, &write_size)) != 0)
goto fail1;
- if (chunk_sizep != NULL)
- *chunk_sizep = EF10_NVRAM_CHUNK;
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail2;
+
+ if (chunk_sizep != NULL) {
+ if (write_size == 0)
+ *chunk_sizep = EF10_NVRAM_CHUNK;
+ else
+ *chunk_sizep = write_size;
+ }
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -2365,11 +2344,12 @@ fail1:
__checkReturn efx_rc_t
ef10_nvram_partn_rw_finish(
__in efx_nic_t *enp,
- __in uint32_t partn)
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp)
{
efx_rc_t rc;
- if ((rc = ef10_nvram_partn_unlock(enp, partn, NULL)) != 0)
+ if ((rc = ef10_nvram_partn_unlock(enp, partn, verify_resultp)) != 0)
goto fail1;
return (0);
diff --git a/drivers/net/sfc/base/ef10_phy.c b/drivers/net/sfc/base/ef10_phy.c
index 81309f29..aa8d6a2b 100644
--- a/drivers/net/sfc/base/ef10_phy.c
+++ b/drivers/net/sfc/base/ef10_phy.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -280,7 +256,9 @@ ef10_phy_reconfigure(
uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
MC_CMD_SET_LINK_OUT_LEN)];
uint32_t cap_mask;
+#if EFSYS_OPT_PHY_LED_CONTROL
unsigned int led_mode;
+#endif
unsigned int speed;
boolean_t supported;
efx_rc_t rc;
diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
index 849f674c..2bb6705d 100644
--- a/drivers/net/sfc/base/ef10_rx.c
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -38,33 +14,54 @@
static __checkReturn efx_rc_t
efx_mcdi_init_rxq(
__in efx_nic_t *enp,
- __in uint32_t size,
+ __in uint32_t ndescs,
__in uint32_t target_evq,
__in uint32_t label,
__in uint32_t instance,
__in efsys_mem_t *esmp,
__in boolean_t disable_scatter,
+ __in boolean_t want_inner_classes,
__in uint32_t ps_bufsize)
{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN,
MC_CMD_INIT_RXQ_EXT_OUT_LEN)];
- int npages = EFX_RXQ_NBUFS(size);
+ int npages = EFX_RXQ_NBUFS(ndescs);
int i;
efx_qword_t *dma_addr;
uint64_t addr;
efx_rc_t rc;
uint32_t dma_mode;
+ boolean_t want_outer_classes;
- /* If this changes, then the payload size might need to change. */
- EFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0);
- EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS);
+ EFSYS_ASSERT3U(ndescs, <=, EFX_RXQ_MAXNDESCS);
if (ps_bufsize > 0)
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
else
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
+ if (encp->enc_tunnel_encapsulations_supported != 0 &&
+ !want_inner_classes) {
+ /*
+ * WANT_OUTER_CLASSES can only be specified on hardware which
+ * supports tunnel encapsulation offloads, even though it is
+ * effectively the behaviour the hardware gives.
+ *
+ * Also, on hardware which does support such offloads, older
+ * firmware rejects the flag if the offloads are not supported
+ * by the current firmware variant, which means this may fail if
+ * the capabilities are not updated when the firmware variant
+ * changes. This is not an issue on newer firmware, as it was
+ * changed in bug 69842 (v6.4.2.1007) to permit this flag to be
+ * specified on all firmware variants.
+ */
+ want_outer_classes = B_TRUE;
+ } else {
+ want_outer_classes = B_FALSE;
+ }
+
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_RXQ;
req.emr_in_buf = payload;
@@ -72,11 +69,11 @@ efx_mcdi_init_rxq(
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN;
- MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, size);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
- MCDI_IN_POPULATE_DWORD_8(req, INIT_RXQ_EXT_IN_FLAGS,
+ MCDI_IN_POPULATE_DWORD_9(req, INIT_RXQ_EXT_IN_FLAGS,
INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
@@ -85,7 +82,8 @@ efx_mcdi_init_rxq(
INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
INIT_RXQ_EXT_IN_DMA_MODE,
dma_mode,
- INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize);
+ INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize,
+ INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes);
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
@@ -197,7 +195,13 @@ efx_mcdi_rss_context_alloc(
MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
EVB_PORT_ID_ASSIGNED);
MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type);
- /* NUM_QUEUES is only used to validate indirection table offsets */
+
+ /*
+ * For exclusive contexts, NUM_QUEUES is only used to validate
+ * indirection table offsets.
+ * For shared contexts, the provided context will spread traffic over
+ * NUM_QUEUES many queues.
+ */
MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues);
efx_mcdi_execute(enp, &req);
@@ -707,26 +711,45 @@ ef10_rx_prefix_hash(
}
#endif /* EFSYS_OPT_RX_SCALE */
- void
+#if EFSYS_OPT_RX_PACKED_STREAM
+/*
+ * Fake length for RXQ descriptors in packed stream mode
+ * to make hardware happy
+ */
+#define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32
+#endif
+
+ void
ef10_rx_qpost(
- __in efx_rxq_t *erp,
- __in_ecount(n) efsys_dma_addr_t *addrp,
- __in size_t size,
- __in unsigned int n,
- __in unsigned int completed,
- __in unsigned int added)
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added)
{
efx_qword_t qword;
unsigned int i;
unsigned int offset;
unsigned int id;
+ _NOTE(ARGUNUSED(completed))
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * Real size of the buffer does not fit into ESF_DZ_RX_KER_BYTE_CNT
+ * and equal to 0 after applying mask. Hardware does not like it.
+ */
+ if (erp->er_ev_qstate->eers_rx_packed_stream)
+ size = EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE;
+#endif
+
/* The client driver must not overfill the queue */
- EFSYS_ASSERT3U(added - completed + n, <=,
+ EFSYS_ASSERT3U(added - completed + ndescs, <=,
EFX_RXQ_LIMIT(erp->er_mask + 1));
id = added & (erp->er_mask);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < ndescs; i++) {
EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
unsigned int, id, efsys_dma_addr_t, addrp[i],
size_t, size);
@@ -779,31 +802,47 @@ ef10_rx_qpush(
#if EFSYS_OPT_RX_PACKED_STREAM
void
-ef10_rx_qps_update_credits(
- __in efx_rxq_t *erp)
+ef10_rx_qpush_ps_credits(
+ __in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
efx_dword_t dword;
- efx_evq_rxq_state_t *rxq_state =
- &erp->er_eep->ee_rxq_state[erp->er_label];
+ efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate;
+ uint32_t credits;
EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
if (rxq_state->eers_rx_packed_stream_credits == 0)
return;
+ /*
+ * It is a bug if we think that FW has utilized more
+ * credits than it is allowed to have (maximum). However,
+ * make sure that we do not credit more than maximum anyway.
+ */
+ credits = MIN(rxq_state->eers_rx_packed_stream_credits,
+ EFX_RX_PACKED_STREAM_MAX_CREDITS);
EFX_POPULATE_DWORD_3(dword,
ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1,
ERF_DZ_RX_DESC_MAGIC_CMD,
ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS,
- ERF_DZ_RX_DESC_MAGIC_DATA,
- rxq_state->eers_rx_packed_stream_credits);
+ ERF_DZ_RX_DESC_MAGIC_DATA, credits);
EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
erp->er_index, &dword, B_FALSE);
rxq_state->eers_rx_packed_stream_credits = 0;
}
+/*
+ * In accordance with SF-112241-TC the received data has the following layout:
+ * - 8 byte pseudo-header which consist of:
+ * - 4 byte little-endian timestamp
+ * - 2 byte little-endian captured length in bytes
+ * - 2 byte little-endian original packet length in bytes
+ * - captured packet bytes
+ * - optional padding to align to 64 bytes boundary
+ * - 64 bytes scratch space for the host software
+ */
__checkReturn uint8_t *
ef10_rx_qps_packet_info(
__in efx_rxq_t *erp,
@@ -817,8 +856,7 @@ ef10_rx_qps_packet_info(
uint16_t buf_len;
uint8_t *pkt_start;
efx_qword_t *qwordp;
- efx_evq_rxq_state_t *rxq_state =
- &erp->er_eep->ee_rxq_state[erp->er_label];
+ efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate;
EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
@@ -839,11 +877,8 @@ ef10_rx_qps_packet_info(
EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp);
if ((*next_offsetp ^ current_offset) &
- EFX_RX_PACKED_STREAM_MEM_PER_CREDIT) {
- if (rxq_state->eers_rx_packed_stream_credits <
- EFX_RX_PACKED_STREAM_MAX_CREDITS)
- rxq_state->eers_rx_packed_stream_credits++;
- }
+ EFX_RX_PACKED_STREAM_MEM_PER_CREDIT)
+ rxq_state->eers_rx_packed_stream_credits++;
return (pkt_start);
}
@@ -891,18 +926,21 @@ ef10_rx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
+ __in uint32_t type_data,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
+ __in unsigned int flags,
__in efx_evq_t *eep,
__in efx_rxq_t *erp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_rc_t rc;
boolean_t disable_scatter;
+ boolean_t want_inner_classes;
unsigned int ps_buf_size;
- _NOTE(ARGUNUSED(id, erp))
+ _NOTE(ARGUNUSED(id, erp, type_data))
EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
@@ -911,7 +949,8 @@ ef10_rx_qcreate(
EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
- if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+ if (!ISP2(ndescs) ||
+ (ndescs < EFX_RXQ_MINNDESCS) || (ndescs > EFX_RXQ_MAXNDESCS)) {
rc = EINVAL;
goto fail1;
}
@@ -922,29 +961,35 @@ ef10_rx_qcreate(
switch (type) {
case EFX_RXQ_TYPE_DEFAULT:
- case EFX_RXQ_TYPE_SCATTER:
ps_buf_size = 0;
break;
#if EFSYS_OPT_RX_PACKED_STREAM
- case EFX_RXQ_TYPE_PACKED_STREAM_1M:
- ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
- break;
- case EFX_RXQ_TYPE_PACKED_STREAM_512K:
- ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K;
- break;
- case EFX_RXQ_TYPE_PACKED_STREAM_256K:
- ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K;
- break;
- case EFX_RXQ_TYPE_PACKED_STREAM_128K:
- ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K;
- break;
- case EFX_RXQ_TYPE_PACKED_STREAM_64K:
- ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K;
+ case EFX_RXQ_TYPE_PACKED_STREAM:
+ switch (type_data) {
+ case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
+ break;
+ case EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K;
+ break;
+ case EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K;
+ break;
+ case EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K;
+ break;
+ case EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail3;
+ }
break;
#endif /* EFSYS_OPT_RX_PACKED_STREAM */
default:
rc = ENOTSUP;
- goto fail3;
+ goto fail4;
}
#if EFSYS_OPT_RX_PACKED_STREAM
@@ -952,13 +997,13 @@ ef10_rx_qcreate(
/* Check if datapath firmware supports packed stream mode */
if (encp->enc_rx_packed_stream_supported == B_FALSE) {
rc = ENOTSUP;
- goto fail4;
+ goto fail5;
}
/* Check if packed stream allows configurable buffer sizes */
- if ((type != EFX_RXQ_TYPE_PACKED_STREAM_1M) &&
+ if ((ps_buf_size != MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M) &&
(encp->enc_rx_var_packed_stream_supported == B_FALSE)) {
rc = ENOTSUP;
- goto fail5;
+ goto fail6;
}
}
#else /* EFSYS_OPT_RX_PACKED_STREAM */
@@ -966,32 +1011,44 @@ ef10_rx_qcreate(
#endif /* EFSYS_OPT_RX_PACKED_STREAM */
/* Scatter can only be disabled if the firmware supports doing so */
- if (type == EFX_RXQ_TYPE_SCATTER)
+ if (flags & EFX_RXQ_FLAG_SCATTER)
disable_scatter = B_FALSE;
else
disable_scatter = encp->enc_rx_disable_scatter_supported;
- if ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index,
- esmp, disable_scatter, ps_buf_size)) != 0)
- goto fail6;
+ if (flags & EFX_RXQ_FLAG_INNER_CLASSES)
+ want_inner_classes = B_TRUE;
+ else
+ want_inner_classes = B_FALSE;
+
+ if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep->ee_index, label, index,
+ esmp, disable_scatter, want_inner_classes,
+ ps_buf_size)) != 0)
+ goto fail7;
erp->er_eep = eep;
erp->er_label = label;
- ef10_ev_rxlabel_init(eep, erp, label, ps_buf_size != 0);
+ ef10_ev_rxlabel_init(eep, erp, label, type);
+
+ erp->er_ev_qstate = &erp->er_eep->ee_rxq_state[label];
return (0);
+fail7:
+ EFSYS_PROBE(fail7);
+#if EFSYS_OPT_RX_PACKED_STREAM
fail6:
EFSYS_PROBE(fail6);
-#if EFSYS_OPT_RX_PACKED_STREAM
fail5:
EFSYS_PROBE(fail5);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
fail4:
EFSYS_PROBE(fail4);
-#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+#if EFSYS_OPT_RX_PACKED_STREAM
fail3:
EFSYS_PROBE(fail3);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
fail2:
EFSYS_PROBE(fail2);
fail1:
diff --git a/drivers/net/sfc/base/ef10_tlv_layout.h b/drivers/net/sfc/base/ef10_tlv_layout.h
index 7d099b81..2473a66a 100644
--- a/drivers/net/sfc/base/ef10_tlv_layout.h
+++ b/drivers/net/sfc/base/ef10_tlv_layout.h
@@ -1,13 +1,8 @@
-/**************************************************************************\
-*//*! \file
-** <L5_PRIVATE L5_SOURCE>
-** \author mjs
-** \brief TLV item layouts for EF10 static and dynamic config in NVRAM
-** \date 2012/11/20
-** \cop (c) Solarflare Communications Inc.
-** </L5_PRIVATE>
-*//*
-\**************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
/* These structures define the layouts for the TLV items stored in static and
* dynamic configuration partitions in NVRAM for EF10 (Huntington etc.).
diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c
index 211d2655..69c75700 100644
--- a/drivers/net/sfc/base/ef10_tx.c
+++ b/drivers/net/sfc/base/ef10_tx.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -47,7 +23,7 @@
static __checkReturn efx_rc_t
efx_mcdi_init_txq(
__in efx_nic_t *enp,
- __in uint32_t size,
+ __in uint32_t ndescs,
__in uint32_t target_evq,
__in uint32_t label,
__in uint32_t instance,
@@ -66,7 +42,7 @@ efx_mcdi_init_txq(
EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
- npages = EFX_TXQ_NBUFS(size);
+ npages = EFX_TXQ_NBUFS(ndescs);
if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
rc = EINVAL;
goto fail1;
@@ -79,17 +55,21 @@ efx_mcdi_init_txq(
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
- MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs);
MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
- MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
+ MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS,
INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
(flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
(flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
+ INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN,
+ (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0,
+ INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN,
+ (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
INIT_TXQ_IN_CRC_MODE, 0,
@@ -187,21 +167,30 @@ ef10_tx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint16_t flags,
__in efx_evq_t *eep,
__in efx_txq_t *etp,
__out unsigned int *addedp)
{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint16_t inner_csum;
efx_qword_t desc;
efx_rc_t rc;
_NOTE(ARGUNUSED(id))
- if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
- esmp)) != 0)
+ inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP;
+ if (((flags & inner_csum) != 0) &&
+ (encp->enc_tunnel_encapsulations_supported == 0)) {
+ rc = EINVAL;
goto fail1;
+ }
+
+ if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index,
+ flags, esmp)) != 0)
+ goto fail2;
/*
* A previous user of this TX queue may have written a descriptor to the
@@ -212,19 +201,25 @@ ef10_tx_qcreate(
* a no-op TX option descriptor. See bug29981 for details.
*/
*addedp = 1;
- EFX_POPULATE_QWORD_4(desc,
+ EFX_POPULATE_QWORD_6(desc,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
(flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
ESF_DZ_TX_OPTION_IP_CSUM,
- (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
+ ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_INNER_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
ef10_tx_qpush(etp, *addedp, 0);
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -414,24 +409,24 @@ fail1:
return (rc);
}
- __checkReturn efx_rc_t
+ __checkReturn efx_rc_t
ef10_tx_qpost(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_buffer_t *eb,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp)
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *eb,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
{
unsigned int added = *addedp;
unsigned int i;
efx_rc_t rc;
- if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
rc = ENOSPC;
goto fail1;
}
- for (i = 0; i < n; i++) {
+ for (i = 0; i < ndescs; i++) {
efx_buffer_t *ebp = &eb[i];
efsys_dma_addr_t addr = ebp->eb_addr;
size_t size = ebp->eb_size;
@@ -473,9 +468,9 @@ fail1:
}
/*
- * This improves performance by pushing a TX descriptor at the same time as the
- * doorbell. The descriptor must be added to the TXQ, so that can be used if the
- * hardware decides not to use the pushed descriptor.
+ * This improves performance by, when possible, pushing a TX descriptor at the
+ * same time as the doorbell. The descriptor must be added to the TXQ, so that
+ * can be used if the hardware decides not to use the pushed descriptor.
*/
void
ef10_tx_qpush(
@@ -495,36 +490,66 @@ ef10_tx_qpush(
offset = id * sizeof (efx_qword_t);
EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
- EFX_POPULATE_OWORD_3(oword,
- ERF_DZ_TX_DESC_WPTR, wptr,
- ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
- ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
-
- /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
- EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
- EFSYS_PIO_WRITE_BARRIER();
- EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
- &oword);
+
+ /*
+ * Bug 65776: TSO option descriptors cannot be pushed if pacer bypass is
+ * enabled on the event queue this transmit queue is attached to.
+ *
+ * To ensure the code is safe, it is easiest to simply test the type of
+ * the descriptor to push, and only push it is if it not a TSO option
+ * descriptor.
+ */
+ if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) ||
+ (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) !=
+ ESE_DZ_TX_OPTION_DESC_TSO)) {
+ /* Push the descriptor and update the wptr. */
+ EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr,
+ ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+ ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+ /* Ensure ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
+ wptr, id);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
+ etp->et_index, &oword);
+ } else {
+ efx_dword_t dword;
+
+ /*
+ * Only update the wptr. This is signalled to the hardware by
+ * only writing one DWORD of the doorbell register.
+ */
+ EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr);
+ dword = oword.eo_dword[2];
+
+ /* Ensure ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
+ wptr, id);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
+ etp->et_index, &dword, B_FALSE);
+ }
}
- __checkReturn efx_rc_t
+ __checkReturn efx_rc_t
ef10_tx_qdesc_post(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_desc_t *ed,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp)
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_desc_t *ed,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
{
unsigned int added = *addedp;
unsigned int i;
efx_rc_t rc;
- if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
rc = ENOSPC;
goto fail1;
}
- for (i = 0; i < n; i++) {
+ for (i = 0; i < ndescs; i++) {
efx_desc_t *edp = &ed[i];
unsigned int id;
size_t offset;
@@ -536,7 +561,7 @@ ef10_tx_qdesc_post(
}
EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
- unsigned int, added, unsigned int, n);
+ unsigned int, added, unsigned int, ndescs);
EFX_TX_QSTAT_INCR(etp, TX_POST);
@@ -557,6 +582,8 @@ ef10_tx_qdesc_dma_create(
__in boolean_t eop,
__out efx_desc_t *edp)
{
+ _NOTE(ARGUNUSED(etp))
+
/* No limitations on boundary crossing */
EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
@@ -580,6 +607,8 @@ ef10_tx_qdesc_tso_create(
__in uint8_t tcp_flags,
__out efx_desc_t *edp)
{
+ _NOTE(ARGUNUSED(etp))
+
EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
uint16_t, ipv4_id, uint32_t, tcp_seq,
uint8_t, tcp_flags);
@@ -602,6 +631,8 @@ ef10_tx_qdesc_tso2_create(
__out_ecount(count) efx_desc_t *edp,
__in int count)
{
+ _NOTE(ARGUNUSED(etp, count))
+
EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
uint16_t, ipv4_id, uint32_t, tcp_seq,
uint16_t, tcp_mss);
@@ -631,6 +662,8 @@ ef10_tx_qdesc_vlantci_create(
__in uint16_t tci,
__out efx_desc_t *edp)
{
+ _NOTE(ARGUNUSED(etp))
+
EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
uint16_t, tci);
diff --git a/drivers/net/sfc/base/ef10_vpd.c b/drivers/net/sfc/base/ef10_vpd.c
index 71123a90..ad522e82 100644
--- a/drivers/net/sfc/base/ef10_vpd.c
+++ b/drivers/net/sfc/base/ef10_vpd.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h
index 57fba052..fe996e7c 100644
--- a/drivers/net/sfc/base/efx.h
+++ b/drivers/net/sfc/base/efx.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2006-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2006-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_EFX_H
@@ -40,13 +16,16 @@ extern "C" {
#endif
#define EFX_STATIC_ASSERT(_cond) \
- ((void)sizeof(char[(_cond) ? 1 : -1]))
+ ((void)sizeof (char[(_cond) ? 1 : -1]))
#define EFX_ARRAY_SIZE(_array) \
- (sizeof(_array) / sizeof((_array)[0]))
+ (sizeof (_array) / sizeof ((_array)[0]))
#define EFX_FIELD_OFFSET(_type, _field) \
- ((size_t) &(((_type *)0)->_field))
+ ((size_t)&(((_type *)0)->_field))
+
+/* The macro expands divider twice */
+#define EFX_DIV_ROUND_UP(_n, _d) (((_n) + (_d) - 1) / (_d))
/* Return codes */
@@ -555,7 +534,7 @@ efx_mac_stats_get_mask(
#define EFX_MAC_STAT_SUPPORTED(_mask, _stat) \
((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \
- (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1))))
+ (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1))))
#define EFX_MAC_STATS_SIZE 0x400
@@ -624,7 +603,7 @@ efx_mon_init(
#define EFX_MON_STATS_PAGE_SIZE 0x100
#define EFX_MON_MASK_ELEMENT_SIZE 32
-/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 5d4ee5185e419abe */
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock aa0233c80156308e */
typedef enum efx_mon_stat_e {
EFX_MON_STAT_2_5V,
EFX_MON_STAT_VCCP1,
@@ -703,6 +682,8 @@ typedef enum efx_mon_stat_e {
EFX_MON_STAT_CONTROLLER_TDIODE_TEMP,
EFX_MON_STAT_BOARD_FRONT_TEMP,
EFX_MON_STAT_BOARD_BACK_TEMP,
+ EFX_MON_STAT_I1V8,
+ EFX_MON_STAT_I2V5,
EFX_MON_NSTATS
} efx_mon_stat_t;
@@ -903,7 +884,8 @@ typedef enum efx_phy_media_type_e {
EFX_PHY_MEDIA_NTYPES
} efx_phy_media_type_t;
-/* Get the type of medium currently used. If the board has ports for
+/*
+ * Get the type of medium currently used. If the board has ports for
* modules, a module is present, and we recognise the media type of
* the module, then this will be the media type of the module.
* Otherwise it will be the media type of the port.
@@ -913,13 +895,13 @@ efx_phy_media_type_get(
__in efx_nic_t *enp,
__out efx_phy_media_type_t *typep);
-extern efx_rc_t
+extern __checkReturn efx_rc_t
efx_phy_module_get_info(
- __in efx_nic_t *enp,
- __in uint8_t dev_addr,
- __in uint8_t offset,
- __in uint8_t len,
- __out_bcount(len) uint8_t *data);
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
#if EFSYS_OPT_PHY_STATS
@@ -1004,7 +986,7 @@ typedef enum efx_bist_type_e {
EFX_BIST_TYPE_PHY_CABLE_SHORT,
EFX_BIST_TYPE_PHY_CABLE_LONG,
EFX_BIST_TYPE_MC_MEM, /* Test the MC DMEM and IMEM */
- EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus*/
+ EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus */
EFX_BIST_TYPE_REG, /* Test the register memories */
EFX_BIST_TYPE_NTYPES,
} efx_bist_type_t;
@@ -1035,8 +1017,10 @@ typedef enum efx_bist_value_e {
EFX_BIST_PHY_CABLE_STATUS_C,
EFX_BIST_PHY_CABLE_STATUS_D,
EFX_BIST_FAULT_CODE,
- /* Memory BIST specific values. These match to the MC_CMD_BIST_POLL
- * response. */
+ /*
+ * Memory BIST specific values. These match to the MC_CMD_BIST_POLL
+ * response.
+ */
EFX_BIST_MEM_TEST,
EFX_BIST_MEM_ADDR,
EFX_BIST_MEM_BUS,
@@ -1197,6 +1181,11 @@ typedef struct efx_nic_cfg_s {
boolean_t enc_pm_and_rxdp_counters;
boolean_t enc_mac_stats_40g_tx_size_bins;
uint32_t enc_tunnel_encapsulations_supported;
+ /*
+ * NIC global maximum for unique UDP tunnel ports shared by all
+ * functions.
+ */
+ uint32_t enc_tunnel_config_udp_entries_max;
/* External port identifier */
uint8_t enc_external_port;
uint32_t enc_mcdi_max_payload_length;
@@ -1206,7 +1195,7 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_required_pcie_bandwidth_mbps;
uint32_t enc_max_pcie_link_gen;
/* Firmware verifies integrity of NVRAM updates */
- uint32_t enc_fw_verified_nvram_update_required;
+ uint32_t enc_nvram_update_verify_result_supported;
} efx_nic_cfg_t;
#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff)
@@ -1381,6 +1370,7 @@ typedef enum efx_nvram_type_e {
EFX_NVRAM_DYNAMIC_CFG,
EFX_NVRAM_LICENSE,
EFX_NVRAM_UEFIROM,
+ EFX_NVRAM_MUM_FIRMWARE,
EFX_NVRAM_NTYPES,
} efx_nvram_type_t;
@@ -1411,7 +1401,8 @@ efx_nvram_rw_start(
extern __checkReturn efx_rc_t
efx_nvram_rw_finish(
__in efx_nic_t *enp,
- __in efx_nvram_type_t type);
+ __in efx_nvram_type_t type,
+ __out_opt uint32_t *verify_resultp);
extern __checkReturn efx_rc_t
efx_nvram_get_version(
@@ -1429,6 +1420,14 @@ efx_nvram_read_chunk(
__in size_t size);
extern __checkReturn efx_rc_t
+efx_nvram_read_backup(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
efx_nvram_set_version(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
@@ -1488,13 +1487,13 @@ efx_bootcfg_copy_sector(
extern efx_rc_t
efx_bootcfg_read(
__in efx_nic_t *enp,
- __out_bcount(size) caddr_t data,
+ __out_bcount(size) uint8_t *data,
__in size_t size);
extern efx_rc_t
efx_bootcfg_write(
__in efx_nic_t *enp,
- __in_bcount(size) caddr_t data,
+ __in_bcount(size) uint8_t *data,
__in size_t size);
#endif /* EFSYS_OPT_BOOTCFG */
@@ -1621,7 +1620,7 @@ efx_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint32_t us,
__in uint32_t flags,
@@ -1778,8 +1777,7 @@ typedef __checkReturn boolean_t
typedef __checkReturn boolean_t
(*efx_mac_stats_ev_t)(
__in_opt void *arg,
- __in uint32_t generation
- );
+ __in uint32_t generation);
#endif /* EFSYS_OPT_MAC_STATS */
@@ -1985,15 +1983,26 @@ efx_pseudo_hdr_pkt_length_get(
typedef enum efx_rxq_type_e {
EFX_RXQ_TYPE_DEFAULT,
- EFX_RXQ_TYPE_SCATTER,
- EFX_RXQ_TYPE_PACKED_STREAM_1M,
- EFX_RXQ_TYPE_PACKED_STREAM_512K,
- EFX_RXQ_TYPE_PACKED_STREAM_256K,
- EFX_RXQ_TYPE_PACKED_STREAM_128K,
- EFX_RXQ_TYPE_PACKED_STREAM_64K,
+ EFX_RXQ_TYPE_PACKED_STREAM,
EFX_RXQ_NTYPES
} efx_rxq_type_t;
+/*
+ * Dummy flag to be used instead of 0 to make it clear that the argument
+ * is receive queue flags.
+ */
+#define EFX_RXQ_FLAG_NONE 0x0
+#define EFX_RXQ_FLAG_SCATTER 0x1
+/*
+ * If tunnels are supported and Rx event can provide information about
+ * either outer or inner packet classes (e.g. SFN8xxx adapters with
+ * full-feature firmware variant running), outer classes are requested by
+ * default. However, if the driver supports tunnels, the flag allows to
+ * request inner classes which are required to be able to interpret inner
+ * Rx checksum offload results.
+ */
+#define EFX_RXQ_FLAG_INNER_CLASSES 0x2
+
extern __checkReturn efx_rc_t
efx_rx_qcreate(
__in efx_nic_t *enp,
@@ -2001,11 +2010,33 @@ efx_rx_qcreate(
__in unsigned int label,
__in efx_rxq_type_t type,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
+ __in unsigned int flags,
__in efx_evq_t *eep,
__deref_out efx_rxq_t **erpp);
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M (1U * 1024 * 1024)
+#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K (512U * 1024)
+#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K (256U * 1024)
+#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K (128U * 1024)
+#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K (64U * 1024)
+
+extern __checkReturn efx_rc_t
+efx_rx_qcreate_packed_stream(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in uint32_t ps_buf_size,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+#endif
+
typedef struct efx_buffer_s {
efsys_dma_addr_t eb_addr;
size_t eb_size;
@@ -2016,14 +2047,14 @@ typedef struct efx_desc_s {
efx_qword_t ed_eq;
} efx_desc_t;
-extern void
+extern void
efx_rx_qpost(
- __in efx_rxq_t *erp,
- __in_ecount(n) efsys_dma_addr_t *addrp,
- __in size_t size,
- __in unsigned int n,
- __in unsigned int completed,
- __in unsigned int added);
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added);
extern void
efx_rx_qpush(
@@ -2033,14 +2064,8 @@ efx_rx_qpush(
#if EFSYS_OPT_RX_PACKED_STREAM
-/*
- * Fake length for RXQ descriptors in packed stream mode
- * to make hardware happy
- */
-#define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32
-
extern void
-efx_rx_qps_update_credits(
+efx_rx_qpush_ps_credits(
__in efx_rxq_t *erp);
extern __checkReturn uint8_t *
@@ -2096,13 +2121,14 @@ efx_tx_fini(
#define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
#define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
#define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16)
-#define EFX_TXQ_DC_NDESCS(_dcsize) (8 << _dcsize)
#define EFX_TXQ_MAX_BUFS 8 /* Maximum independent of EFX_BUG35388_WORKAROUND. */
-#define EFX_TXQ_CKSUM_IPV4 0x0001
-#define EFX_TXQ_CKSUM_TCPUDP 0x0002
-#define EFX_TXQ_FATSOV2 0x0004
+#define EFX_TXQ_CKSUM_IPV4 0x0001
+#define EFX_TXQ_CKSUM_TCPUDP 0x0002
+#define EFX_TXQ_FATSOV2 0x0004
+#define EFX_TXQ_CKSUM_INNER_IPV4 0x0008
+#define EFX_TXQ_CKSUM_INNER_TCPUDP 0x0010
extern __checkReturn efx_rc_t
efx_tx_qcreate(
@@ -2117,13 +2143,13 @@ efx_tx_qcreate(
__deref_out efx_txq_t **etpp,
__out unsigned int *addedp);
-extern __checkReturn efx_rc_t
+extern __checkReturn efx_rc_t
efx_tx_qpost(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_buffer_t *eb,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp);
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *eb,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
extern __checkReturn efx_rc_t
efx_tx_qpace(
@@ -2260,35 +2286,43 @@ efx_tx_qdestroy(
/* Filter is for TX */
#define EFX_FILTER_FLAG_TX 0x10
-typedef unsigned int efx_filter_flags_t;
+typedef uint8_t efx_filter_flags_t;
/*
* Flags which specify the fields to match on. The values are the same as in the
* MC_CMD_FILTER_OP/MC_CMD_FILTER_OP_EXT commands.
*/
-typedef enum efx_filter_match_flags_e {
- EFX_FILTER_MATCH_REM_HOST = 0x0001, /* Match by remote IP host
- * address */
- EFX_FILTER_MATCH_LOC_HOST = 0x0002, /* Match by local IP host
- * address */
- EFX_FILTER_MATCH_REM_MAC = 0x0004, /* Match by remote MAC address */
- EFX_FILTER_MATCH_REM_PORT = 0x0008, /* Match by remote TCP/UDP port */
- EFX_FILTER_MATCH_LOC_MAC = 0x0010, /* Match by remote TCP/UDP port */
- EFX_FILTER_MATCH_LOC_PORT = 0x0020, /* Match by local TCP/UDP port */
- EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, /* Match by Ether-type */
- EFX_FILTER_MATCH_INNER_VID = 0x0080, /* Match by inner VLAN ID */
- EFX_FILTER_MATCH_OUTER_VID = 0x0100, /* Match by outer VLAN ID */
- EFX_FILTER_MATCH_IP_PROTO = 0x0200, /* Match by IP transport
- * protocol */
- /* For encapsulated packets, match all multicast inner frames */
- EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST = 0x01000000,
- /* For encapsulated packets, match all unicast inner frames */
- EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST = 0x02000000,
- /* Match otherwise-unmatched multicast and broadcast packets */
- EFX_FILTER_MATCH_UNKNOWN_MCAST_DST = 0x40000000,
- /* Match otherwise-unmatched unicast packets */
- EFX_FILTER_MATCH_UNKNOWN_UCAST_DST = 0x80000000,
-} efx_filter_match_flags_t;
+
+/* Match by remote IP host address */
+#define EFX_FILTER_MATCH_REM_HOST 0x00000001
+/* Match by local IP host address */
+#define EFX_FILTER_MATCH_LOC_HOST 0x00000002
+/* Match by remote MAC address */
+#define EFX_FILTER_MATCH_REM_MAC 0x00000004
+/* Match by remote TCP/UDP port */
+#define EFX_FILTER_MATCH_REM_PORT 0x00000008
+/* Match by remote TCP/UDP port */
+#define EFX_FILTER_MATCH_LOC_MAC 0x00000010
+/* Match by local TCP/UDP port */
+#define EFX_FILTER_MATCH_LOC_PORT 0x00000020
+/* Match by Ether-type */
+#define EFX_FILTER_MATCH_ETHER_TYPE 0x00000040
+/* Match by inner VLAN ID */
+#define EFX_FILTER_MATCH_INNER_VID 0x00000080
+/* Match by outer VLAN ID */
+#define EFX_FILTER_MATCH_OUTER_VID 0x00000100
+/* Match by IP transport protocol */
+#define EFX_FILTER_MATCH_IP_PROTO 0x00000200
+/* For encapsulated packets, match all multicast inner frames */
+#define EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 0x01000000
+/* For encapsulated packets, match all unicast inner frames */
+#define EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST 0x02000000
+/* Match otherwise-unmatched multicast and broadcast packets */
+#define EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 0x40000000
+/* Match otherwise-unmatched unicast packets */
+#define EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 0x80000000
+
+typedef uint32_t efx_filter_match_flags_t;
typedef enum efx_filter_priority_s {
EFX_FILTER_PRI_HINT = 0, /* Performance hint */
@@ -2309,22 +2343,22 @@ typedef enum efx_filter_priority_s {
*/
typedef struct efx_filter_spec_s {
- uint32_t efs_match_flags;
- uint32_t efs_priority:2;
- uint32_t efs_flags:6;
- uint32_t efs_dmaq_id:12;
- uint32_t efs_rss_context;
- uint16_t efs_outer_vid;
- uint16_t efs_inner_vid;
- uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN];
- uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN];
- uint16_t efs_ether_type;
- uint8_t efs_ip_proto;
- efx_tunnel_protocol_t efs_encap_type;
- uint16_t efs_loc_port;
- uint16_t efs_rem_port;
- efx_oword_t efs_rem_host;
- efx_oword_t efs_loc_host;
+ efx_filter_match_flags_t efs_match_flags;
+ uint8_t efs_priority;
+ efx_filter_flags_t efs_flags;
+ uint16_t efs_dmaq_id;
+ uint32_t efs_rss_context;
+ uint16_t efs_outer_vid;
+ uint16_t efs_inner_vid;
+ uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN];
+ uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN];
+ uint16_t efs_ether_type;
+ uint8_t efs_ip_proto;
+ efx_tunnel_protocol_t efs_encap_type;
+ uint16_t efs_loc_port;
+ uint16_t efs_rem_port;
+ efx_oword_t efs_rem_host;
+ efx_oword_t efs_loc_host;
} efx_filter_spec_t;
@@ -2500,8 +2534,7 @@ efx_lic_find_start(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
- __out uint32_t *startp
- );
+ __out uint32_t *startp);
extern __checkReturn efx_rc_t
efx_lic_find_end(
@@ -2510,8 +2543,7 @@ efx_lic_find_end(
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
- __out uint32_t *endp
- );
+ __out uint32_t *endp);
extern __checkReturn __success(return != B_FALSE) boolean_t
efx_lic_find_key(
@@ -2521,15 +2553,13 @@ efx_lic_find_key(
__in size_t buffer_size,
__in uint32_t offset,
__out uint32_t *startp,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
extern __checkReturn __success(return != B_FALSE) boolean_t
efx_lic_validate_key(
__in efx_nic_t *enp,
__in_bcount(length) caddr_t keyp,
- __in uint32_t length
- );
+ __in uint32_t length);
extern __checkReturn efx_rc_t
efx_lic_read_key(
@@ -2542,8 +2572,7 @@ efx_lic_read_key(
__out_bcount_part(key_max_size, *lengthp)
caddr_t keyp,
__in size_t key_max_size,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
extern __checkReturn efx_rc_t
efx_lic_write_key(
@@ -2554,8 +2583,7 @@ efx_lic_write_key(
__in uint32_t offset,
__in_bcount(length) caddr_t keyp,
__in uint32_t length,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
__checkReturn efx_rc_t
efx_lic_delete_key(
@@ -2566,27 +2594,70 @@ efx_lic_delete_key(
__in uint32_t offset,
__in uint32_t length,
__in uint32_t end,
- __out uint32_t *deltap
- );
+ __out uint32_t *deltap);
extern __checkReturn efx_rc_t
efx_lic_create_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- );
+ __in size_t buffer_size);
extern __checkReturn efx_rc_t
efx_lic_finish_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- );
+ __in size_t buffer_size);
#endif /* EFSYS_OPT_LICENSING */
+/* TUNNEL */
+
+#if EFSYS_OPT_TUNNEL
+
+extern __checkReturn efx_rc_t
+efx_tunnel_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_tunnel_fini(
+ __in efx_nic_t *enp);
+
+/*
+ * For overlay network encapsulation using UDP, the firmware needs to know
+ * the configured UDP port for the overlay so it can decode encapsulated
+ * frames correctly.
+ * The UDP port/protocol list is global.
+ */
+
+extern __checkReturn efx_rc_t
+efx_tunnel_config_udp_add(
+ __in efx_nic_t *enp,
+ __in uint16_t port /* host/cpu-endian */,
+ __in efx_tunnel_protocol_t protocol);
+
+extern __checkReturn efx_rc_t
+efx_tunnel_config_udp_remove(
+ __in efx_nic_t *enp,
+ __in uint16_t port /* host/cpu-endian */,
+ __in efx_tunnel_protocol_t protocol);
+
+extern void
+efx_tunnel_config_clear(
+ __in efx_nic_t *enp);
+
+/**
+ * Apply tunnel UDP ports configuration to hardware.
+ *
+ * EAGAIN is returned if hardware will be reset (datapath and managment CPU
+ * reboot).
+ */
+extern __checkReturn efx_rc_t
+efx_tunnel_reconfigure(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_TUNNEL */
#ifdef __cplusplus
diff --git a/drivers/net/sfc/base/efx_bootcfg.c b/drivers/net/sfc/base/efx_bootcfg.c
index d589c86a..0f71936f 100644
--- a/drivers/net/sfc/base/efx_bootcfg.c
+++ b/drivers/net/sfc/base/efx_bootcfg.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -290,7 +266,7 @@ fail1:
efx_rc_t
efx_bootcfg_read(
__in efx_nic_t *enp,
- __out_bcount(size) caddr_t data,
+ __out_bcount(size) uint8_t *data,
__in size_t size)
{
uint8_t *payload = NULL;
@@ -345,18 +321,18 @@ efx_bootcfg_read(
if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
sector_offset, (caddr_t)payload, sector_length)) != 0) {
- (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+ (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL);
goto fail6;
}
- if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
goto fail7;
/* Verify that the area is correctly formatted and checksummed */
- rc = efx_bootcfg_verify(enp, (caddr_t)payload, sector_length,
+ rc = efx_bootcfg_verify(enp, payload, sector_length,
&used_bytes);
if (rc != 0 || used_bytes == 0) {
- payload[0] = (uint8_t)~DHCP_END;
+ payload[0] = (uint8_t)(~DHCP_END & 0xff);
payload[1] = DHCP_END;
used_bytes = 2;
}
@@ -430,7 +406,7 @@ fail1:
efx_rc_t
efx_bootcfg_write(
__in efx_nic_t *enp,
- __in_bcount(size) caddr_t data,
+ __in_bcount(size) uint8_t *data,
__in size_t size)
{
uint8_t *partn_data;
@@ -521,7 +497,7 @@ efx_bootcfg_write(
0, (caddr_t)partn_data, partn_length)) != 0)
goto fail11;
- if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
goto fail12;
EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data);
@@ -537,7 +513,7 @@ fail10:
fail9:
EFSYS_PROBE(fail9);
- (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+ (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL);
fail8:
EFSYS_PROBE(fail8);
diff --git a/drivers/net/sfc/base/efx_check.h b/drivers/net/sfc/base/efx_check.h
index c8548c04..58377759 100644
--- a/drivers/net/sfc/base/efx_check.h
+++ b/drivers/net/sfc/base/efx_check.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_EFX_CHECK_H
@@ -224,8 +200,8 @@
#if EFSYS_OPT_PHY_LED_CONTROL
/* Support for PHY LED control */
-# if !EFSYS_OPT_SIENA
-# error "PHY_LED_CONTROL requires SIENA"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_PHY_LED_CONTROL */
@@ -259,8 +235,8 @@
#if EFSYS_OPT_PHY_STATS
/* Support PHY statistics */
-# if !EFSYS_OPT_SIENA
-# error "PHY_STATS requires SIENA"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "PHY_STATS requires SIENA or HUNTINGTON or MEDFORD"
# endif
#endif /* EFSYS_OPT_PHY_STATS */
@@ -343,4 +319,11 @@
# endif
#endif
+/* Support hardware assistance for tunnels */
+#if EFSYS_OPT_TUNNEL
+# if !EFSYS_OPT_MEDFORD
+# error "TUNNEL requires MEDFORD"
+# endif
+#endif /* EFSYS_OPT_TUNNEL */
+
#endif /* _SYS_EFX_CHECK_H */
diff --git a/drivers/net/sfc/base/efx_crc32.c b/drivers/net/sfc/base/efx_crc32.c
index 27e2708a..29c02e19 100644
--- a/drivers/net/sfc/base/efx_crc32.c
+++ b/drivers/net/sfc/base/efx_crc32.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2013-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2013-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/efx_ev.c b/drivers/net/sfc/base/efx_ev.c
index 42ded5aa..949d352a 100644
--- a/drivers/net/sfc/base/efx_ev.c
+++ b/drivers/net/sfc/base/efx_ev.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -65,7 +41,7 @@ siena_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint32_t us,
__in uint32_t flags,
@@ -216,21 +192,21 @@ efx_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint32_t us,
__in uint32_t flags,
__deref_out efx_evq_t **eepp)
{
const efx_ev_ops_t *eevop = enp->en_eevop;
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_evq_t *eep;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
- EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit);
+ EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <,
+ enp->en_nic_cfg.enc_evq_limit);
switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
@@ -256,7 +232,7 @@ efx_ev_qcreate(
eep->ee_magic = EFX_EVQ_MAGIC;
eep->ee_enp = enp;
eep->ee_index = index;
- eep->ee_mask = n - 1;
+ eep->ee_mask = ndescs - 1;
eep->ee_flags = flags;
eep->ee_esmp = esmp;
@@ -271,7 +247,7 @@ efx_ev_qcreate(
enp->en_ev_qcount++;
*eepp = eep;
- if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, us, flags,
+ if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags,
eep)) != 0)
goto fail4;
@@ -1281,7 +1257,7 @@ siena_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint32_t us,
__in uint32_t flags,
@@ -1298,7 +1274,8 @@ siena_ev_qcreate(
EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
- if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+ if (!ISP2(ndescs) ||
+ (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
rc = EINVAL;
goto fail1;
}
@@ -1315,7 +1292,7 @@ siena_ev_qcreate(
#endif
for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
size++)
- if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS))
+ if ((1 << size) == (int)(ndescs / EFX_EVQ_MINNEVS))
break;
if (id + (1 << size) >= encp->enc_buftbl_limit) {
rc = EINVAL;
@@ -1416,6 +1393,8 @@ efx_ev_qstat_name(
__in efx_nic_t *enp,
__in unsigned int id)
{
+ _NOTE(ARGUNUSED(enp))
+
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(id, <, EV_NQSTATS);
diff --git a/drivers/net/sfc/base/efx_filter.c b/drivers/net/sfc/base/efx_filter.c
index 5cab7d87..b92541aa 100644
--- a/drivers/net/sfc/base/efx_filter.c
+++ b/drivers/net/sfc/base/efx_filter.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/efx_hash.c b/drivers/net/sfc/base/efx_hash.c
index 3cc0d200..43e6bc05 100644
--- a/drivers/net/sfc/base/efx_hash.c
+++ b/drivers/net/sfc/base/efx_hash.c
@@ -1,4 +1,5 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
* Copyright 2006 Bob Jenkins
*
* Derived from public domain source, see
@@ -10,33 +11,8 @@
* ...You can use this free for any purpose. It's in the public domain.
* It has no warranty."
*
- * Copyright (c) 2014-2016 Solarflare Communications Inc.
+ * Copyright (c) 2014-2018 Solarflare Communications Inc.
* All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h
index 53fa37ac..ed685cba 100644
--- a/drivers/net/sfc/base/efx_impl.h
+++ b/drivers/net/sfc/base/efx_impl.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_EFX_IMPL_H
@@ -74,6 +50,7 @@ extern "C" {
#define EFX_MOD_MON 0x00000400
#define EFX_MOD_FILTER 0x00001000
#define EFX_MOD_LIC 0x00002000
+#define EFX_MOD_TUNNEL 0x00004000
#define EFX_RESET_PHY 0x00000001
#define EFX_RESET_RXQ_ERR 0x00000002
@@ -173,7 +150,7 @@ typedef struct efx_rx_ops_s {
unsigned int);
void (*erxo_qpush)(efx_rxq_t *, unsigned int, unsigned int *);
#if EFSYS_OPT_RX_PACKED_STREAM
- void (*erxo_qps_update_credits)(efx_rxq_t *);
+ void (*erxo_qpush_ps_credits)(efx_rxq_t *);
uint8_t * (*erxo_qps_packet_info)(efx_rxq_t *, uint8_t *,
uint32_t, uint32_t,
uint16_t *, uint32_t *, uint32_t *);
@@ -181,8 +158,9 @@ typedef struct efx_rx_ops_s {
efx_rc_t (*erxo_qflush)(efx_rxq_t *);
void (*erxo_qenable)(efx_rxq_t *);
efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int,
- unsigned int, efx_rxq_type_t,
+ unsigned int, efx_rxq_type_t, uint32_t,
efsys_mem_t *, size_t, uint32_t,
+ unsigned int,
efx_evq_t *, efx_rxq_t *);
void (*erxo_qdestroy)(efx_rxq_t *);
} efx_rx_ops_t;
@@ -261,6 +239,12 @@ efx_filter_reconfigure(
#endif /* EFSYS_OPT_FILTER */
+#if EFSYS_OPT_TUNNEL
+typedef struct efx_tunnel_ops_s {
+ boolean_t (*eto_udp_encap_supported)(efx_nic_t *);
+ efx_rc_t (*eto_reconfigure)(efx_nic_t *);
+} efx_tunnel_ops_t;
+#endif /* EFSYS_OPT_TUNNEL */
typedef struct efx_port_s {
efx_mac_type_t ep_mac_type;
@@ -296,7 +280,6 @@ typedef struct efx_port_s {
uint32_t ep_default_adv_cap_mask;
uint32_t ep_phy_cap_mask;
boolean_t ep_mac_drain;
- boolean_t ep_mac_stats_pending;
#if EFSYS_OPT_BIST
efx_bist_type_t ep_current_bist;
#endif
@@ -358,12 +341,7 @@ typedef struct efx_nic_ops_s {
#ifndef EFX_RXQ_LIMIT_TARGET
#define EFX_RXQ_LIMIT_TARGET 512
#endif
-#ifndef EFX_TXQ_DC_SIZE
-#define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */
-#endif
-#ifndef EFX_RXQ_DC_SIZE
-#define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */
-#endif
+
#if EFSYS_OPT_FILTER
@@ -438,6 +416,22 @@ siena_filter_tbl_clear(
#if EFSYS_OPT_MCDI
+#define EFX_TUNNEL_MAXNENTRIES (16)
+
+#if EFSYS_OPT_TUNNEL
+
+typedef struct efx_tunnel_udp_entry_s {
+ uint16_t etue_port; /* host/cpu-endian */
+ uint16_t etue_protocol;
+} efx_tunnel_udp_entry_t;
+
+typedef struct efx_tunnel_cfg_s {
+ efx_tunnel_udp_entry_t etc_udp_entries[EFX_TUNNEL_MAXNENTRIES];
+ unsigned int etc_udp_entries_num;
+} efx_tunnel_cfg_t;
+
+#endif /* EFSYS_OPT_TUNNEL */
+
typedef struct efx_mcdi_ops_s {
efx_rc_t (*emco_init)(efx_nic_t *, const efx_mcdi_transport_t *);
void (*emco_send_request)(efx_nic_t *, void *, size_t,
@@ -461,6 +455,10 @@ typedef struct efx_mcdi_s {
#endif /* EFSYS_OPT_MCDI */
#if EFSYS_OPT_NVRAM
+
+/* Invalid partition ID for en_nvram_partn_locked field of efx_nc_t */
+#define EFX_NVRAM_PARTN_INVALID (0xffffffffu)
+
typedef struct efx_nvram_ops_s {
#if EFSYS_OPT_DIAG
efx_rc_t (*envo_test)(efx_nic_t *);
@@ -471,11 +469,14 @@ typedef struct efx_nvram_ops_s {
efx_rc_t (*envo_partn_rw_start)(efx_nic_t *, uint32_t, size_t *);
efx_rc_t (*envo_partn_read)(efx_nic_t *, uint32_t,
unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_read_backup)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
efx_rc_t (*envo_partn_erase)(efx_nic_t *, uint32_t,
unsigned int, size_t);
efx_rc_t (*envo_partn_write)(efx_nic_t *, uint32_t,
unsigned int, caddr_t, size_t);
- efx_rc_t (*envo_partn_rw_finish)(efx_nic_t *, uint32_t);
+ efx_rc_t (*envo_partn_rw_finish)(efx_nic_t *, uint32_t,
+ uint32_t *);
efx_rc_t (*envo_partn_get_version)(efx_nic_t *, uint32_t,
uint32_t *, uint16_t *);
efx_rc_t (*envo_partn_set_version)(efx_nic_t *, uint32_t,
@@ -564,7 +565,7 @@ efx_mcdi_nvram_update_finish(
__in efx_nic_t *enp,
__in uint32_t partn,
__in boolean_t reboot,
- __out_opt uint32_t *resultp);
+ __out_opt uint32_t *verify_resultp);
#if EFSYS_OPT_DIAG
@@ -643,11 +644,15 @@ struct efx_nic_s {
efx_filter_t en_filter;
const efx_filter_ops_t *en_efop;
#endif /* EFSYS_OPT_FILTER */
+#if EFSYS_OPT_TUNNEL
+ efx_tunnel_cfg_t en_tunnel_cfg;
+ const efx_tunnel_ops_t *en_etop;
+#endif /* EFSYS_OPT_TUNNEL */
#if EFSYS_OPT_MCDI
efx_mcdi_t en_mcdi;
#endif /* EFSYS_OPT_MCDI */
#if EFSYS_OPT_NVRAM
- efx_nvram_type_t en_nvram_locked;
+ uint32_t en_nvram_partn_locked;
const efx_nvram_ops_t *en_envop;
#endif /* EFSYS_OPT_NVRAM */
#if EFSYS_OPT_VPD
@@ -754,6 +759,7 @@ struct efx_rxq_s {
unsigned int er_label;
unsigned int er_mask;
efsys_mem_t *er_esmp;
+ efx_evq_rxq_state_t *er_ev_qstate;
};
#define EFX_RXQ_MAGIC 0x15022005
@@ -1036,8 +1042,7 @@ struct efx_txq_s {
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \
- const char *, \
- #_reg, \
+ const char *, #_reg, \
uint32_t, (_index), \
uint32_t, _reg ## _OFST, \
uint32_t, (_eop)->eo_u32[3], \
@@ -1072,10 +1077,6 @@ struct efx_txq_s {
} while (B_FALSE)
extern __checkReturn efx_rc_t
-efx_nic_biu_test(
- __in efx_nic_t *enp);
-
-extern __checkReturn efx_rc_t
efx_mac_select(
__in efx_nic_t *enp);
@@ -1143,32 +1144,6 @@ efx_vpd_hunk_set(
#endif /* EFSYS_OPT_VPD */
-#if EFSYS_OPT_DIAG
-
-extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[];
-
-typedef struct efx_register_set_s {
- unsigned int address;
- unsigned int step;
- unsigned int rows;
- efx_oword_t mask;
-} efx_register_set_t;
-
-extern __checkReturn efx_rc_t
-efx_nic_test_registers(
- __in efx_nic_t *enp,
- __in efx_register_set_t *rsp,
- __in size_t count);
-
-extern __checkReturn efx_rc_t
-efx_nic_test_tables(
- __in efx_nic_t *enp,
- __in efx_register_set_t *rsp,
- __in efx_pattern_type_t pattern,
- __in size_t count);
-
-#endif /* EFSYS_OPT_DIAG */
-
#if EFSYS_OPT_MCDI
extern __checkReturn efx_rc_t
diff --git a/drivers/net/sfc/base/efx_intr.c b/drivers/net/sfc/base/efx_intr.c
index f0422d53..83ca177d 100644
--- a/drivers/net/sfc/base/efx_intr.c
+++ b/drivers/net/sfc/base/efx_intr.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/efx_lic.c b/drivers/net/sfc/base/efx_lic.c
index 2cd05cc8..ad4d2211 100644
--- a/drivers/net/sfc/base/efx_lic.c
+++ b/drivers/net/sfc/base/efx_lic.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -43,8 +19,7 @@ efx_lic_v1v2_find_start(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
- __out uint32_t *startp
- );
+ __out uint32_t *startp);
__checkReturn efx_rc_t
efx_lic_v1v2_find_end(
@@ -53,8 +28,7 @@ efx_lic_v1v2_find_end(
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
- __out uint32_t *endp
- );
+ __out uint32_t *endp);
__checkReturn __success(return != B_FALSE) boolean_t
efx_lic_v1v2_find_key(
@@ -64,15 +38,13 @@ efx_lic_v1v2_find_key(
__in size_t buffer_size,
__in uint32_t offset,
__out uint32_t *startp,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
__checkReturn __success(return != B_FALSE) boolean_t
efx_lic_v1v2_validate_key(
__in efx_nic_t *enp,
__in_bcount(length) caddr_t keyp,
- __in uint32_t length
- );
+ __in uint32_t length);
__checkReturn efx_rc_t
efx_lic_v1v2_read_key(
@@ -85,8 +57,7 @@ efx_lic_v1v2_read_key(
__out_bcount_part(key_max_size, *lengthp)
caddr_t keyp,
__in size_t key_max_size,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
__checkReturn efx_rc_t
efx_lic_v1v2_write_key(
@@ -97,8 +68,7 @@ efx_lic_v1v2_write_key(
__in uint32_t offset,
__in_bcount(length) caddr_t keyp,
__in uint32_t length,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
__checkReturn efx_rc_t
efx_lic_v1v2_delete_key(
@@ -109,24 +79,21 @@ efx_lic_v1v2_delete_key(
__in uint32_t offset,
__in uint32_t length,
__in uint32_t end,
- __out uint32_t *deltap
- );
+ __out uint32_t *deltap);
__checkReturn efx_rc_t
efx_lic_v1v2_create_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- );
+ __in size_t buffer_size);
__checkReturn efx_rc_t
efx_lic_v1v2_finish_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- );
+ __in size_t buffer_size);
#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
@@ -227,8 +194,7 @@ efx_lic_v3_find_start(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
- __out uint32_t *startp
- );
+ __out uint32_t *startp);
__checkReturn efx_rc_t
efx_lic_v3_find_end(
@@ -237,8 +203,7 @@ efx_lic_v3_find_end(
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
- __out uint32_t *endp
- );
+ __out uint32_t *endp);
__checkReturn __success(return != B_FALSE) boolean_t
efx_lic_v3_find_key(
@@ -248,15 +213,13 @@ efx_lic_v3_find_key(
__in size_t buffer_size,
__in uint32_t offset,
__out uint32_t *startp,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
__checkReturn __success(return != B_FALSE) boolean_t
efx_lic_v3_validate_key(
__in efx_nic_t *enp,
__in_bcount(length) caddr_t keyp,
- __in uint32_t length
- );
+ __in uint32_t length);
__checkReturn efx_rc_t
efx_lic_v3_read_key(
@@ -269,8 +232,7 @@ efx_lic_v3_read_key(
__out_bcount_part(key_max_size, *lengthp)
caddr_t keyp,
__in size_t key_max_size,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
__checkReturn efx_rc_t
efx_lic_v3_write_key(
@@ -281,8 +243,7 @@ efx_lic_v3_write_key(
__in uint32_t offset,
__in_bcount(length) caddr_t keyp,
__in uint32_t length,
- __out uint32_t *lengthp
- );
+ __out uint32_t *lengthp);
__checkReturn efx_rc_t
efx_lic_v3_delete_key(
@@ -293,31 +254,28 @@ efx_lic_v3_delete_key(
__in uint32_t offset,
__in uint32_t length,
__in uint32_t end,
- __out uint32_t *deltap
- );
+ __out uint32_t *deltap);
__checkReturn efx_rc_t
efx_lic_v3_create_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- );
+ __in size_t buffer_size);
__checkReturn efx_rc_t
efx_lic_v3_finish_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- );
+ __in size_t buffer_size);
static const efx_lic_ops_t __efx_lic_v3_ops = {
efx_mcdi_licensing_v3_update_licenses, /* elo_update_licenses */
efx_mcdi_licensing_v3_report_license, /* elo_get_key_stats */
efx_mcdi_licensing_v3_app_state, /* elo_app_state */
efx_mcdi_licensing_v3_get_id, /* elo_get_id */
- efx_lic_v3_find_start, /* elo_find_start*/
+ efx_lic_v3_find_start, /* elo_find_start */
efx_lic_v3_find_end, /* elo_find_end */
efx_lic_v3_find_key, /* elo_find_key */
efx_lic_v3_validate_key, /* elo_validate_key */
@@ -452,8 +410,8 @@ fail1:
* Length (L): 16bit - value length in bytes
* Value (V): L bytes - payload
*/
-#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256)
-#define EFX_LICENSE_V1V2_HEADER_LENGTH (2 * sizeof(uint16_t))
+#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256)
+#define EFX_LICENSE_V1V2_HEADER_LENGTH (2 * sizeof (uint16_t))
__checkReturn efx_rc_t
efx_lic_v1v2_find_start(
@@ -461,8 +419,7 @@ efx_lic_v1v2_find_start(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
- __out uint32_t *startp
- )
+ __out uint32_t *startp)
{
_NOTE(ARGUNUSED(enp, bufferp, buffer_size))
@@ -477,8 +434,7 @@ efx_lic_v1v2_find_end(
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
- __out uint32_t *endp
- )
+ __out uint32_t *endp)
{
_NOTE(ARGUNUSED(enp, bufferp, buffer_size))
@@ -494,8 +450,7 @@ efx_lic_v1v2_find_key(
__in size_t buffer_size,
__in uint32_t offset,
__out uint32_t *startp,
- __out uint32_t *lengthp
- )
+ __out uint32_t *lengthp)
{
boolean_t found;
uint16_t tlv_type;
@@ -519,7 +474,7 @@ efx_lic_v1v2_find_key(
return (found);
fail1:
- EFSYS_PROBE(fail1);
+ EFSYS_PROBE1(fail1, boolean_t, B_FALSE);
return (B_FALSE);
}
@@ -528,8 +483,7 @@ fail1:
efx_lic_v1v2_validate_key(
__in efx_nic_t *enp,
__in_bcount(length) caddr_t keyp,
- __in uint32_t length
- )
+ __in uint32_t length)
{
uint16_t tlv_type;
uint16_t tlv_length;
@@ -562,7 +516,7 @@ fail3:
fail2:
EFSYS_PROBE(fail2);
fail1:
- EFSYS_PROBE(fail1);
+ EFSYS_PROBE1(fail1, boolean_t, B_FALSE);
return (B_FALSE);
}
@@ -579,12 +533,11 @@ efx_lic_v1v2_read_key(
__out_bcount_part(key_max_size, *lengthp)
caddr_t keyp,
__in size_t key_max_size,
- __out uint32_t *lengthp
- )
+ __out uint32_t *lengthp)
{
efx_rc_t rc;
- _NOTE(ARGUNUSED(enp))
+ _NOTE(ARGUNUSED(enp, buffer_size))
EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
EFX_LICENSE_V1V2_HEADER_LENGTH));
@@ -613,8 +566,7 @@ efx_lic_v1v2_write_key(
__in uint32_t offset,
__in_bcount(length) caddr_t keyp,
__in uint32_t length,
- __out uint32_t *lengthp
- )
+ __out uint32_t *lengthp)
{
efx_rc_t rc;
@@ -650,13 +602,12 @@ efx_lic_v1v2_delete_key(
__in uint32_t offset,
__in uint32_t length,
__in uint32_t end,
- __out uint32_t *deltap
- )
+ __out uint32_t *deltap)
{
uint32_t move_start = offset + length;
uint32_t move_length = end - move_start;
- _NOTE(ARGUNUSED(enp))
+ _NOTE(ARGUNUSED(enp, buffer_size))
EFSYS_ASSERT(end <= buffer_size);
/* Shift everything after the key down */
@@ -672,10 +623,9 @@ efx_lic_v1v2_create_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- )
+ __in size_t buffer_size)
{
- _NOTE(ARGUNUSED(enp))
+ _NOTE(ARGUNUSED(enp, buffer_size))
EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size);
/* Write terminator */
@@ -689,8 +639,7 @@ efx_lic_v1v2_finish_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- )
+ __in size_t buffer_size)
{
_NOTE(ARGUNUSED(enp, bufferp, buffer_size))
@@ -1002,7 +951,8 @@ efx_mcdi_licensing_v3_app_state(
goto fail1;
}
- if (req.emr_out_length_used < MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) {
+ if (req.emr_out_length_used <
+ MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
@@ -1052,7 +1002,8 @@ efx_mcdi_licensing_v3_get_id(
req.emr_in_buf = bufferp;
req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
req.emr_out_buf = bufferp;
- req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
+ req.emr_out_length =
+ MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
(void) memset(bufferp, 0, req.emr_out_length);
}
@@ -1069,12 +1020,14 @@ efx_mcdi_licensing_v3_get_id(
}
*typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE);
- *lengthp = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH);
+ *lengthp =
+ MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH);
if (bufferp == NULL) {
- /* modify length requirements to indicate to caller the extra buffering
- ** needed to read the complete output.
- */
+ /*
+ * Modify length requirements to indicate to caller the extra
+ * buffering needed to read the complete output.
+ */
*lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
} else {
/* Shift ID down to start of buffer */
@@ -1096,8 +1049,8 @@ fail1:
}
/* V3 format uses Huntington TLV format partition. See SF-108797-SW */
-#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64)
-#define EFX_LICENSE_V3_KEY_LENGTH_MAX (160)
+#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64)
+#define EFX_LICENSE_V3_KEY_LENGTH_MAX (160)
__checkReturn efx_rc_t
efx_lic_v3_find_start(
@@ -1105,12 +1058,12 @@ efx_lic_v3_find_start(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
- __out uint32_t *startp
- )
+ __out uint32_t *startp)
{
_NOTE(ARGUNUSED(enp))
- return ef10_nvram_buffer_find_item_start(bufferp, buffer_size, startp);
+ return (ef10_nvram_buffer_find_item_start(bufferp, buffer_size,
+ startp));
}
__checkReturn efx_rc_t
@@ -1120,12 +1073,11 @@ efx_lic_v3_find_end(
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
- __out uint32_t *endp
- )
+ __out uint32_t *endp)
{
_NOTE(ARGUNUSED(enp))
- return ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp);
+ return (ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp));
}
__checkReturn __success(return != B_FALSE) boolean_t
@@ -1136,8 +1088,7 @@ efx_lic_v3_find_key(
__in size_t buffer_size,
__in uint32_t offset,
__out uint32_t *startp,
- __out uint32_t *lengthp
- )
+ __out uint32_t *lengthp)
{
_NOTE(ARGUNUSED(enp))
@@ -1149,8 +1100,7 @@ efx_lic_v3_find_key(
efx_lic_v3_validate_key(
__in efx_nic_t *enp,
__in_bcount(length) caddr_t keyp,
- __in uint32_t length
- )
+ __in uint32_t length)
{
/* Check key is a valid V3 key */
uint8_t key_type;
@@ -1184,7 +1134,7 @@ fail3:
fail2:
EFSYS_PROBE(fail2);
fail1:
- EFSYS_PROBE(fail1);
+ EFSYS_PROBE1(fail1, boolean_t, B_FALSE);
return (B_FALSE);
}
@@ -1200,8 +1150,7 @@ efx_lic_v3_read_key(
__out_bcount_part(key_max_size, *lengthp)
caddr_t keyp,
__in size_t key_max_size,
- __out uint32_t *lengthp
- )
+ __out uint32_t *lengthp)
{
_NOTE(ARGUNUSED(enp))
@@ -1218,8 +1167,7 @@ efx_lic_v3_write_key(
__in uint32_t offset,
__in_bcount(length) caddr_t keyp,
__in uint32_t length,
- __out uint32_t *lengthp
- )
+ __out uint32_t *lengthp)
{
_NOTE(ARGUNUSED(enp))
EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX);
@@ -1237,8 +1185,7 @@ efx_lic_v3_delete_key(
__in uint32_t offset,
__in uint32_t length,
__in uint32_t end,
- __out uint32_t *deltap
- )
+ __out uint32_t *deltap)
{
efx_rc_t rc;
@@ -1264,8 +1211,7 @@ efx_lic_v3_create_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- )
+ __in size_t buffer_size)
{
efx_rc_t rc;
@@ -1290,8 +1236,7 @@ efx_lic_v3_finish_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- )
+ __in size_t buffer_size)
{
efx_rc_t rc;
@@ -1383,7 +1328,7 @@ efx_lic_check_support(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
- return enp->en_licensing_supported;
+ return (enp->en_licensing_supported);
}
void
@@ -1474,8 +1419,7 @@ efx_lic_get_id(
__in size_t buffer_size,
__out uint32_t *typep,
__out size_t *lengthp,
- __out_opt uint8_t *bufferp
- )
+ __out_opt uint8_t *bufferp)
{
const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
@@ -1498,7 +1442,10 @@ fail1:
return (rc);
}
-/* Buffer management API - abstracts varying TLV format used for License partition */
+/*
+ * Buffer management API - abstracts varying TLV format used for License
+ * partition.
+ */
__checkReturn efx_rc_t
efx_lic_find_start(
@@ -1506,8 +1453,7 @@ efx_lic_find_start(
__in_bcount(buffer_size)
caddr_t bufferp,
__in size_t buffer_size,
- __out uint32_t *startp
- )
+ __out uint32_t *startp)
{
const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
@@ -1533,8 +1479,7 @@ efx_lic_find_end(
caddr_t bufferp,
__in size_t buffer_size,
__in uint32_t offset,
- __out uint32_t *endp
- )
+ __out uint32_t *endp)
{
const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
@@ -1542,7 +1487,8 @@ efx_lic_find_end(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
- if ((rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp)) != 0)
+ rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp);
+ if (rc != 0)
goto fail1;
return (0);
@@ -1561,8 +1507,7 @@ efx_lic_find_key(
__in size_t buffer_size,
__in uint32_t offset,
__out uint32_t *startp,
- __out uint32_t *lengthp
- )
+ __out uint32_t *lengthp)
{
const efx_lic_ops_t *elop = enp->en_elop;
@@ -1578,15 +1523,15 @@ efx_lic_find_key(
}
-/* Validate that the buffer contains a single key in a recognised format.
-** An empty or terminator buffer is not accepted as a valid key.
-*/
+/*
+ * Validate that the buffer contains a single key in a recognised format.
+ * An empty or terminator buffer is not accepted as a valid key.
+ */
__checkReturn __success(return != B_FALSE) boolean_t
efx_lic_validate_key(
__in efx_nic_t *enp,
__in_bcount(length) caddr_t keyp,
- __in uint32_t length
- )
+ __in uint32_t length)
{
const efx_lic_ops_t *elop = enp->en_elop;
boolean_t rc;
@@ -1616,8 +1561,7 @@ efx_lic_read_key(
__out_bcount_part(key_max_size, *lengthp)
caddr_t keyp,
__in size_t key_max_size,
- __out uint32_t *lengthp
- )
+ __out uint32_t *lengthp)
{
const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
@@ -1646,8 +1590,7 @@ efx_lic_write_key(
__in uint32_t offset,
__in_bcount(length) caddr_t keyp,
__in uint32_t length,
- __out uint32_t *lengthp
- )
+ __out uint32_t *lengthp)
{
const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
@@ -1676,8 +1619,7 @@ efx_lic_delete_key(
__in uint32_t offset,
__in uint32_t length,
__in uint32_t end,
- __out uint32_t *deltap
- )
+ __out uint32_t *deltap)
{
const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
@@ -1702,8 +1644,7 @@ efx_lic_create_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- )
+ __in size_t buffer_size)
{
const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
@@ -1728,8 +1669,7 @@ efx_lic_finish_partition(
__in efx_nic_t *enp,
__in_bcount(buffer_size)
caddr_t bufferp,
- __in size_t buffer_size
- )
+ __in size_t buffer_size)
{
const efx_lic_ops_t *elop = enp->en_elop;
efx_rc_t rc;
diff --git a/drivers/net/sfc/base/efx_mac.c b/drivers/net/sfc/base/efx_mac.c
index 752e7205..511f3eb5 100644
--- a/drivers/net/sfc/base/efx_mac.c
+++ b/drivers/net/sfc/base/efx_mac.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -40,7 +16,7 @@ siena_mac_multicast_list_set(
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_SIENA
-static const efx_mac_ops_t __efx_siena_mac_ops = {
+static const efx_mac_ops_t __efx_mac_siena_ops = {
siena_mac_poll, /* emo_poll */
siena_mac_up, /* emo_up */
siena_mac_reconfigure, /* emo_addr_set */
@@ -64,7 +40,7 @@ static const efx_mac_ops_t __efx_siena_mac_ops = {
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
-static const efx_mac_ops_t __efx_ef10_mac_ops = {
+static const efx_mac_ops_t __efx_mac_ef10_ops = {
ef10_mac_poll, /* emo_poll */
ef10_mac_up, /* emo_up */
ef10_mac_addr_set, /* emo_addr_set */
@@ -751,16 +727,9 @@ efx_mac_stats_upload(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
- /*
- * Don't assert !ep_mac_stats_pending, because the client might
- * have failed to finalise statistics when previously stopping
- * the port.
- */
if ((rc = emop->emo_stats_upload(enp, esmp)) != 0)
goto fail1;
- epp->ep_mac_stats_pending = B_TRUE;
-
return (0);
fail1:
@@ -820,8 +789,6 @@ efx_mac_stats_update(
EFSYS_ASSERT(emop != NULL);
rc = emop->emo_stats_update(enp, esmp, essp, generationp);
- if (rc == 0)
- epp->ep_mac_stats_pending = B_FALSE;
return (rc);
}
@@ -840,21 +807,21 @@ efx_mac_select(
switch (enp->en_family) {
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
- emop = &__efx_siena_mac_ops;
+ emop = &__efx_mac_siena_ops;
type = EFX_MAC_SIENA;
break;
#endif /* EFSYS_OPT_SIENA */
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
- emop = &__efx_ef10_mac_ops;
+ emop = &__efx_mac_ef10_ops;
type = EFX_MAC_HUNTINGTON;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- emop = &__efx_ef10_mac_ops;
+ emop = &__efx_mac_ef10_ops;
type = EFX_MAC_MEDFORD;
break;
#endif /* EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/efx_mcdi.c b/drivers/net/sfc/base/efx_mcdi.c
index c61b943c..347a5b35 100644
--- a/drivers/net/sfc/base/efx_mcdi.c
+++ b/drivers/net/sfc/base/efx_mcdi.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2008-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2008-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -290,7 +266,8 @@ efx_mcdi_request_start(
*/
if ((max_version >= 2) &&
((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) ||
- (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1))) {
+ (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1) ||
+ (emrp->emr_out_length > MCDI_CTL_SDU_LEN_MAX_V1))) {
/* Construct MCDI v2 header */
hdr_len = sizeof (hdr);
EFX_POPULATE_DWORD_8(hdr[0],
@@ -792,9 +769,8 @@ efx_mcdi_ev_cpl(
emrp->emr_rc = 0;
}
}
- if (errcode == 0) {
+ if (emrp->emr_rc == 0)
efx_mcdi_finish_response(enp, emrp);
- }
emtp->emt_ev_cpl(emtp->emt_context);
}
@@ -809,6 +785,8 @@ efx_mcdi_get_proxy_handle(
{
efx_rc_t rc;
+ _NOTE(ARGUNUSED(enp))
+
/*
* Return proxy handle from MCDI request that returned with error
* MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching
diff --git a/drivers/net/sfc/base/efx_mcdi.h b/drivers/net/sfc/base/efx_mcdi.h
index 21727713..4e69f048 100644
--- a/drivers/net/sfc/base/efx_mcdi.h
+++ b/drivers/net/sfc/base/efx_mcdi.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_EFX_MCDI_H
@@ -51,7 +27,7 @@ struct efx_mcdi_req_s {
unsigned int emr_cmd;
uint8_t *emr_in_buf;
size_t emr_in_length;
- /* Outputs: retcode, buffer, length, and length used*/
+ /* Outputs: retcode, buffer, length, and length used */
efx_rc_t emr_rc;
uint8_t *emr_out_buf;
size_t emr_out_length;
@@ -382,6 +358,10 @@ efx_mcdi_phy_module_get_info(
EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \
EFX_WORD_0)
+#define MCDI_OUT_WORD_FIELD(_emr, _ofst, _field) \
+ EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \
+ MC_CMD_ ## _field)
+
#define MCDI_OUT_DWORD(_emr, _ofst) \
EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
EFX_DWORD_0)
diff --git a/drivers/net/sfc/base/efx_mon.c b/drivers/net/sfc/base/efx_mon.c
index c2f1e97e..234a4201 100644
--- a/drivers/net/sfc/base/efx_mon.c
+++ b/drivers/net/sfc/base/efx_mon.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -123,7 +99,7 @@ fail1:
#if EFSYS_OPT_NAMES
-/* START MKCONFIG GENERATED MonitorStatNamesBlock 5daa2a5725ba734b */
+/* START MKCONFIG GENERATED MonitorStatNamesBlock d92af1538001301f */
static const char * const __mon_stat_name[] = {
"value_2_5v",
"value_vccp1",
@@ -202,6 +178,8 @@ static const char * const __mon_stat_name[] = {
"controller_tdiode_temp",
"board_front_temp",
"board_back_temp",
+ "i1v8",
+ "i2v5",
};
/* END MKCONFIG GENERATED MonitorStatNamesBlock */
diff --git a/drivers/net/sfc/base/efx_nic.c b/drivers/net/sfc/base/efx_nic.c
index 76caa744..e318c17d 100644
--- a/drivers/net/sfc/base/efx_nic.c
+++ b/drivers/net/sfc/base/efx_nic.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -102,78 +78,6 @@ efx_family(
return (ENOTSUP);
}
-
-#define EFX_BIU_MAGIC0 0x01234567
-#define EFX_BIU_MAGIC1 0xfedcba98
-
- __checkReturn efx_rc_t
-efx_nic_biu_test(
- __in efx_nic_t *enp)
-{
- efx_oword_t oword;
- efx_rc_t rc;
-
- /*
- * Write magic values to scratch registers 0 and 1, then
- * verify that the values were written correctly. Interleave
- * the accesses to ensure that the BIU is not just reading
- * back the cached value that was last written.
- */
- EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
- EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
-
- EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
- EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
-
- EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
- if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
- rc = EIO;
- goto fail1;
- }
-
- EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
- if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
- rc = EIO;
- goto fail2;
- }
-
- /*
- * Perform the same test, with the values swapped. This
- * ensures that subsequent tests don't start with the correct
- * values already written into the scratch registers.
- */
- EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
- EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
-
- EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
- EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
-
- EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
- if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
- rc = EIO;
- goto fail3;
- }
-
- EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
- if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
- rc = EIO;
- goto fail4;
- }
-
- return (0);
-
-fail4:
- EFSYS_PROBE(fail4);
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
#if EFSYS_OPT_SIENA
static const efx_nic_ops_t __efx_nic_siena_ops = {
@@ -637,9 +541,9 @@ efx_nic_get_fw_version(
goto fail2;
rc = efx_mcdi_get_capabilities(enp, NULL,
- &enfip->enfi_rx_dpcpu_fw_id,
- &enfip->enfi_tx_dpcpu_fw_id,
- NULL, NULL);
+ &enfip->enfi_rx_dpcpu_fw_id,
+ &enfip->enfi_tx_dpcpu_fw_id,
+ NULL, NULL);
if (rc == 0) {
enfip->enfi_dpcpu_fw_ids_valid = B_TRUE;
} else if (rc == ENOTSUP) {
@@ -650,7 +554,8 @@ efx_nic_get_fw_version(
goto fail3;
}
- memcpy(enfip->enfi_mc_fw_version, mc_fw_version, sizeof(mc_fw_version));
+ memcpy(enfip->enfi_mc_fw_version, mc_fw_version,
+ sizeof (mc_fw_version));
return (0);
@@ -688,139 +593,6 @@ fail1:
return (rc);
}
- __checkReturn efx_rc_t
-efx_nic_test_registers(
- __in efx_nic_t *enp,
- __in efx_register_set_t *rsp,
- __in size_t count)
-{
- unsigned int bit;
- efx_oword_t original;
- efx_oword_t reg;
- efx_oword_t buf;
- efx_rc_t rc;
-
- while (count > 0) {
- /* This function is only suitable for registers */
- EFSYS_ASSERT(rsp->rows == 1);
-
- /* bit sweep on and off */
- EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original,
- B_TRUE);
- for (bit = 0; bit < 128; bit++) {
- /* Is this bit in the mask? */
- if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit))
- continue;
-
- /* Test this bit can be set in isolation */
- reg = original;
- EFX_AND_OWORD(reg, rsp->mask);
- EFX_SET_OWORD_BIT(reg, bit);
-
- EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
- B_TRUE);
- EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
- B_TRUE);
-
- EFX_AND_OWORD(buf, rsp->mask);
- if (memcmp(&reg, &buf, sizeof (reg))) {
- rc = EIO;
- goto fail1;
- }
-
- /* Test this bit can be cleared in isolation */
- EFX_OR_OWORD(reg, rsp->mask);
- EFX_CLEAR_OWORD_BIT(reg, bit);
-
- EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
- B_TRUE);
- EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
- B_TRUE);
-
- EFX_AND_OWORD(buf, rsp->mask);
- if (memcmp(&reg, &buf, sizeof (reg))) {
- rc = EIO;
- goto fail2;
- }
- }
-
- /* Restore the old value */
- EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original,
- B_TRUE);
-
- --count;
- ++rsp;
- }
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- /* Restore the old value */
- EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_nic_test_tables(
- __in efx_nic_t *enp,
- __in efx_register_set_t *rsp,
- __in efx_pattern_type_t pattern,
- __in size_t count)
-{
- efx_sram_pattern_fn_t func;
- unsigned int index;
- unsigned int address;
- efx_oword_t reg;
- efx_oword_t buf;
- efx_rc_t rc;
-
- EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES);
- func = __efx_sram_pattern_fns[pattern];
-
- while (count > 0) {
- /* Write */
- address = rsp->address;
- for (index = 0; index < rsp->rows; ++index) {
- func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
- func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
- EFX_AND_OWORD(reg, rsp->mask);
- EFSYS_BAR_WRITEO(enp->en_esbp, address, &reg, B_TRUE);
-
- address += rsp->step;
- }
-
- /* Read */
- address = rsp->address;
- for (index = 0; index < rsp->rows; ++index) {
- func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
- func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
- EFX_AND_OWORD(reg, rsp->mask);
- EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE);
- if (memcmp(&reg, &buf, sizeof (reg))) {
- rc = EIO;
- goto fail1;
- }
-
- address += rsp->step;
- }
-
- ++rsp;
- --count;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
#endif /* EFSYS_OPT_DIAG */
#if EFSYS_OPT_LOOPBACK
diff --git a/drivers/net/sfc/base/efx_nvram.c b/drivers/net/sfc/base/efx_nvram.c
index 6ee2a71d..c2cc9ad3 100644
--- a/drivers/net/sfc/base/efx_nvram.c
+++ b/drivers/net/sfc/base/efx_nvram.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -43,6 +19,7 @@ static const efx_nvram_ops_t __efx_nvram_siena_ops = {
siena_nvram_partn_size, /* envo_partn_size */
siena_nvram_partn_rw_start, /* envo_partn_rw_start */
siena_nvram_partn_read, /* envo_partn_read */
+ siena_nvram_partn_read, /* envo_partn_read_backup */
siena_nvram_partn_erase, /* envo_partn_erase */
siena_nvram_partn_write, /* envo_partn_write */
siena_nvram_partn_rw_finish, /* envo_partn_rw_finish */
@@ -63,6 +40,7 @@ static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
ef10_nvram_partn_size, /* envo_partn_size */
ef10_nvram_partn_rw_start, /* envo_partn_rw_start */
ef10_nvram_partn_read, /* envo_partn_read */
+ ef10_nvram_partn_read_backup, /* envo_partn_read_backup */
ef10_nvram_partn_erase, /* envo_partn_erase */
ef10_nvram_partn_write, /* envo_partn_write */
ef10_nvram_partn_rw_finish, /* envo_partn_rw_finish */
@@ -112,6 +90,8 @@ efx_nvram_init(
enp->en_envop = envop;
enp->en_mod_flags |= EFX_MOD_NVRAM;
+ enp->en_nvram_partn_locked = EFX_NVRAM_PARTN_INVALID;
+
return (0);
fail1:
@@ -158,8 +138,6 @@ efx_nvram_size(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
- EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
-
if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
goto fail1;
@@ -192,8 +170,6 @@ efx_nvram_get_version(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
- EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
-
if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
goto fail1;
@@ -224,18 +200,15 @@ efx_nvram_rw_start(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
- EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
- EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
-
- EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
-
if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
goto fail1;
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, EFX_NVRAM_PARTN_INVALID);
+
if ((rc = envop->envo_partn_rw_start(enp, partn, chunk_sizep)) != 0)
goto fail2;
- enp->en_nvram_locked = type;
+ enp->en_nvram_partn_locked = partn;
return (0);
@@ -262,15 +235,51 @@ efx_nvram_read_chunk(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
- EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
- EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn);
- EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+ if ((rc = envop->envo_partn_read(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Read from the backup (writeable) store of an A/B partition.
+ * For non A/B partitions, there is only a single store, and so this
+ * function has the same behaviour as efx_nvram_read_chunk().
+ */
+ __checkReturn efx_rc_t
+efx_nvram_read_backup(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
goto fail1;
- if ((rc = envop->envo_partn_read(enp, partn, offset, data, size)) != 0)
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn);
+
+ if ((rc = envop->envo_partn_read_backup(enp, partn, offset,
+ data, size)) != 0)
goto fail2;
return (0);
@@ -297,14 +306,11 @@ efx_nvram_erase(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
- EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
- EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
-
- EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
-
if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
goto fail1;
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn);
+
if ((rc = envop->envo_partn_size(enp, partn, &size)) != 0)
goto fail2;
@@ -338,14 +344,11 @@ efx_nvram_write_chunk(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
- EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
- EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
-
- EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
-
if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
goto fail1;
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn);
+
if ((rc = envop->envo_partn_write(enp, partn, offset, data, size)) != 0)
goto fail2;
@@ -362,37 +365,43 @@ fail1:
__checkReturn efx_rc_t
efx_nvram_rw_finish(
__in efx_nic_t *enp,
- __in efx_nvram_type_t type)
+ __in efx_nvram_type_t type,
+ __out_opt uint32_t *verify_resultp)
{
const efx_nvram_ops_t *envop = enp->en_envop;
uint32_t partn;
+ uint32_t verify_result = 0;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
- EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
- EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
-
- EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
-
if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
goto fail1;
- if ((rc = envop->envo_partn_rw_finish(enp, partn)) != 0)
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, partn);
+
+ if ((rc = envop->envo_partn_rw_finish(enp, partn, &verify_result)) != 0)
goto fail2;
- enp->en_nvram_locked = EFX_NVRAM_INVALID;
+ enp->en_nvram_partn_locked = EFX_NVRAM_PARTN_INVALID;
+
+ if (verify_resultp != NULL)
+ *verify_resultp = verify_result;
return (0);
fail2:
EFSYS_PROBE(fail2);
- enp->en_nvram_locked = EFX_NVRAM_INVALID;
+ enp->en_nvram_partn_locked = EFX_NVRAM_PARTN_INVALID;
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ /* Always report verification result */
+ if (verify_resultp != NULL)
+ *verify_resultp = verify_result;
+
return (rc);
}
@@ -410,17 +419,15 @@ efx_nvram_set_version(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
- EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
/*
* The Siena implementation of envo_set_version() will attempt to
- * acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG sector.
+ * acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG partition.
* Therefore, you can't have already acquired the NVRAM_UPDATE lock.
*/
- EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
-
- if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
- goto fail1;
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, EFX_NVRAM_PARTN_INVALID);
if ((rc = envop->envo_partn_set_version(enp, partn, version)) != 0)
goto fail2;
@@ -451,16 +458,14 @@ efx_nvram_validate(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
- EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
-
-
if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
goto fail1;
- if (envop->envo_type_to_partn != NULL &&
- ((rc = envop->envo_buffer_validate(enp, partn,
- partn_data, partn_size)) != 0))
- goto fail2;
+ if (envop->envo_buffer_validate != NULL) {
+ if ((rc = envop->envo_buffer_validate(enp, partn,
+ partn_data, partn_size)) != 0)
+ goto fail2;
+ }
return (0);
@@ -481,7 +486,7 @@ efx_nvram_fini(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
- EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+ EFSYS_ASSERT3U(enp->en_nvram_partn_locked, ==, EFX_NVRAM_PARTN_INVALID);
enp->en_envop = NULL;
enp->en_mod_flags &= ~EFX_MOD_NVRAM;
@@ -579,7 +584,7 @@ efx_mcdi_nvram_metadata(
MCDI_IN_SET_DWORD(req, NVRAM_METADATA_IN_TYPE, partn);
- efx_mcdi_execute(enp, &req);
+ efx_mcdi_execute_quiet(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
@@ -915,13 +920,13 @@ efx_mcdi_nvram_update_finish(
__in efx_nic_t *enp,
__in uint32_t partn,
__in boolean_t reboot,
- __out_opt uint32_t *resultp)
+ __out_opt uint32_t *verify_resultp)
{
const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN,
MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN)];
- uint32_t result = 0; /* FIXME: use MC_CMD_NVRAM_VERIFY_RC_UNKNOWN */
+ uint32_t verify_result = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
efx_rc_t rc;
(void) memset(payload, 0, sizeof (payload));
@@ -944,28 +949,27 @@ efx_mcdi_nvram_update_finish(
goto fail1;
}
- if (encp->enc_fw_verified_nvram_update_required == B_FALSE) {
- /* Report success if verified updates are not supported. */
- result = MC_CMD_NVRAM_VERIFY_RC_SUCCESS;
- } else {
- /* Firmware-verified NVRAM updates are required */
- if (req.emr_out_length_used <
- MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
+ if (req.emr_out_length_used < MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
+ verify_result = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
+ if (encp->enc_nvram_update_verify_result_supported) {
+ /* Result of update verification is missing */
rc = EMSGSIZE;
goto fail2;
}
- result =
+ } else {
+ verify_result =
MCDI_OUT_DWORD(req, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
+ }
- if (result != MC_CMD_NVRAM_VERIFY_RC_SUCCESS) {
- /* Mandatory verification failed */
- rc = EINVAL;
- goto fail3;
- }
+ if ((encp->enc_nvram_update_verify_result_supported) &&
+ (verify_result != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)) {
+ /* Update verification failed */
+ rc = EINVAL;
+ goto fail3;
}
- if (resultp != NULL)
- *resultp = result;
+ if (verify_resultp != NULL)
+ *verify_resultp = verify_result;
return (0);
@@ -977,8 +981,8 @@ fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
/* Always report verification result */
- if (resultp != NULL)
- *resultp = result;
+ if (verify_resultp != NULL)
+ *verify_resultp = verify_result;
return (rc);
}
diff --git a/drivers/net/sfc/base/efx_phy.c b/drivers/net/sfc/base/efx_phy.c
index 752cd52e..069c2836 100644
--- a/drivers/net/sfc/base/efx_phy.c
+++ b/drivers/net/sfc/base/efx_phy.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -296,7 +272,7 @@ efx_phy_media_type_get(
*typep = epp->ep_fixed_port_type;
}
- __checkReturn efx_rc_t
+ __checkReturn efx_rc_t
efx_phy_module_get_info(
__in efx_nic_t *enp,
__in uint8_t dev_addr,
diff --git a/drivers/net/sfc/base/efx_phy_ids.h b/drivers/net/sfc/base/efx_phy_ids.h
index 9d9a0f90..9fb1c6c4 100644
--- a/drivers/net/sfc/base/efx_phy_ids.h
+++ b/drivers/net/sfc/base/efx_phy_ids.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2013-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2013-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_EFX_PHY_IDS_H
diff --git a/drivers/net/sfc/base/efx_port.c b/drivers/net/sfc/base/efx_port.c
index 518c2a22..a792a9ef 100644
--- a/drivers/net/sfc/base/efx_port.c
+++ b/drivers/net/sfc/base/efx_port.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -109,7 +85,6 @@ efx_port_poll(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
- EFSYS_ASSERT(!epp->ep_mac_stats_pending);
if (link_modep == NULL)
link_modep = &ignore_link_mode;
diff --git a/drivers/net/sfc/base/efx_regs.h b/drivers/net/sfc/base/efx_regs.h
index a1a7f9da..aef212e7 100644
--- a/drivers/net/sfc/base/efx_regs.h
+++ b/drivers/net/sfc/base/efx_regs.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_EFX_REGS_H
diff --git a/drivers/net/sfc/base/efx_regs_ef10.h b/drivers/net/sfc/base/efx_regs_ef10.h
index 11a91848..5f978305 100644
--- a/drivers/net/sfc/base/efx_regs_ef10.h
+++ b/drivers/net/sfc/base/efx_regs_ef10.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_EFX_EF10_REGS_H
diff --git a/drivers/net/sfc/base/efx_regs_mcdi.h b/drivers/net/sfc/base/efx_regs_mcdi.h
index 66896fbb..7389877a 100644
--- a/drivers/net/sfc/base/efx_regs_mcdi.h
+++ b/drivers/net/sfc/base/efx_regs_mcdi.h
@@ -1,26 +1,7 @@
-/*-
- * Copyright 2008-2013 Solarflare Communications Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * Copyright 2008-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
/*! \cidoxg_firmware_mc_cmd */
diff --git a/drivers/net/sfc/base/efx_regs_pci.h b/drivers/net/sfc/base/efx_regs_pci.h
index f90f9565..29f51385 100644
--- a/drivers/net/sfc/base/efx_regs_pci.h
+++ b/drivers/net/sfc/base/efx_regs_pci.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_EFX_REGS_PCI_H
diff --git a/drivers/net/sfc/base/efx_rx.c b/drivers/net/sfc/base/efx_rx.c
index 785365d3..c0dcb752 100644
--- a/drivers/net/sfc/base/efx_rx.c
+++ b/drivers/net/sfc/base/efx_rx.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -86,14 +62,14 @@ siena_rx_prefix_pktlen(
__in uint8_t *buffer,
__out uint16_t *lengthp);
-static void
+static void
siena_rx_qpost(
- __in efx_rxq_t *erp,
- __in_ecount(n) efsys_dma_addr_t *addrp,
- __in size_t size,
- __in unsigned int n,
- __in unsigned int completed,
- __in unsigned int added);
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added);
static void
siena_rx_qpush(
@@ -103,7 +79,7 @@ siena_rx_qpush(
#if EFSYS_OPT_RX_PACKED_STREAM
static void
-siena_rx_qps_update_credits(
+siena_rx_qpush_ps_credits(
__in efx_rxq_t *erp);
static __checkReturn uint8_t *
@@ -131,9 +107,11 @@ siena_rx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
+ __in uint32_t type_data,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
+ __in unsigned int flags,
__in efx_evq_t *eep,
__in efx_rxq_t *erp);
@@ -163,7 +141,7 @@ static const efx_rx_ops_t __efx_rx_siena_ops = {
siena_rx_qpost, /* erxo_qpost */
siena_rx_qpush, /* erxo_qpush */
#if EFSYS_OPT_RX_PACKED_STREAM
- siena_rx_qps_update_credits, /* erxo_qps_update_credits */
+ siena_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */
siena_rx_qps_packet_info, /* erxo_qps_packet_info */
#endif
siena_rx_qflush, /* erxo_qflush */
@@ -192,7 +170,7 @@ static const efx_rx_ops_t __efx_rx_ef10_ops = {
ef10_rx_qpost, /* erxo_qpost */
ef10_rx_qpush, /* erxo_qpush */
#if EFSYS_OPT_RX_PACKED_STREAM
- ef10_rx_qps_update_credits, /* erxo_qps_update_credits */
+ ef10_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */
ef10_rx_qps_packet_info, /* erxo_qps_packet_info */
#endif
ef10_rx_qflush, /* erxo_qflush */
@@ -512,27 +490,27 @@ fail1:
}
#endif /* EFSYS_OPT_RX_SCALE */
- void
+ void
efx_rx_qpost(
- __in efx_rxq_t *erp,
- __in_ecount(n) efsys_dma_addr_t *addrp,
- __in size_t size,
- __in unsigned int n,
- __in unsigned int completed,
- __in unsigned int added)
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added)
{
efx_nic_t *enp = erp->er_enp;
const efx_rx_ops_t *erxop = enp->en_erxop;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
- erxop->erxo_qpost(erp, addrp, size, n, completed, added);
+ erxop->erxo_qpost(erp, addrp, size, ndescs, completed, added);
}
#if EFSYS_OPT_RX_PACKED_STREAM
void
-efx_rx_qps_update_credits(
+efx_rx_qpush_ps_credits(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
@@ -540,7 +518,7 @@ efx_rx_qps_update_credits(
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
- erxop->erxo_qps_update_credits(erp);
+ erxop->erxo_qpush_ps_credits(erp);
}
__checkReturn uint8_t *
@@ -610,15 +588,17 @@ efx_rx_qenable(
erxop->erxo_qenable(erp);
}
- __checkReturn efx_rc_t
-efx_rx_qcreate(
+static __checkReturn efx_rc_t
+efx_rx_qcreate_internal(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
+ __in uint32_t type_data,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
+ __in unsigned int flags,
__in efx_evq_t *eep,
__deref_out efx_rxq_t **erpp)
{
@@ -640,11 +620,11 @@ efx_rx_qcreate(
erp->er_magic = EFX_RXQ_MAGIC;
erp->er_enp = enp;
erp->er_index = index;
- erp->er_mask = n - 1;
+ erp->er_mask = ndescs - 1;
erp->er_esmp = esmp;
- if ((rc = erxop->erxo_qcreate(enp, index, label, type, esmp, n, id,
- eep, erp)) != 0)
+ if ((rc = erxop->erxo_qcreate(enp, index, label, type, type_data, esmp,
+ ndescs, id, flags, eep, erp)) != 0)
goto fail2;
enp->en_rx_qcount++;
@@ -662,6 +642,43 @@ fail1:
return (rc);
}
+ __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in uint32_t id,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ return efx_rx_qcreate_internal(enp, index, label, type, 0, esmp, ndescs,
+ id, flags, eep, erpp);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+ __checkReturn efx_rc_t
+efx_rx_qcreate_packed_stream(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in uint32_t ps_buf_size,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ return efx_rx_qcreate_internal(enp, index, label,
+ EFX_RXQ_TYPE_PACKED_STREAM, ps_buf_size, esmp, ndescs,
+ 0 /* id unused on EF10 */, EFX_RXQ_FLAG_NONE, eep, erpp);
+}
+
+#endif
+
void
efx_rx_qdestroy(
__in efx_rxq_t *erp)
@@ -1163,14 +1180,14 @@ siena_rx_prefix_pktlen(
}
-static void
+static void
siena_rx_qpost(
- __in efx_rxq_t *erp,
- __in_ecount(n) efsys_dma_addr_t *addrp,
- __in size_t size,
- __in unsigned int n,
- __in unsigned int completed,
- __in unsigned int added)
+ __in efx_rxq_t *erp,
+ __in_ecount(ndescs) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __in unsigned int added)
{
efx_qword_t qword;
unsigned int i;
@@ -1178,11 +1195,11 @@ siena_rx_qpost(
unsigned int id;
/* The client driver must not overfill the queue */
- EFSYS_ASSERT3U(added - completed + n, <=,
+ EFSYS_ASSERT3U(added - completed + ndescs, <=,
EFX_RXQ_LIMIT(erp->er_mask + 1));
id = added & (erp->er_mask);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < ndescs; i++) {
EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
unsigned int, id, efsys_dma_addr_t, addrp[i],
size_t, size);
@@ -1235,7 +1252,7 @@ siena_rx_qpush(
#if EFSYS_OPT_RX_PACKED_STREAM
static void
-siena_rx_qps_update_credits(
+siena_rx_qpush_ps_credits(
__in efx_rxq_t *erp)
{
/* Not supported by Siena hardware */
@@ -1303,19 +1320,22 @@ siena_rx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
+ __in uint32_t type_data,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
+ __in unsigned int flags,
__in efx_evq_t *eep,
__in efx_rxq_t *erp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_oword_t oword;
uint32_t size;
- boolean_t jumbo;
+ boolean_t jumbo = B_FALSE;
efx_rc_t rc;
_NOTE(ARGUNUSED(esmp))
+ _NOTE(ARGUNUSED(type_data))
EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS ==
(1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
@@ -1325,7 +1345,8 @@ siena_rx_qcreate(
EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
- if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+ if (!ISP2(ndescs) ||
+ (ndescs < EFX_RXQ_MINNDESCS) || (ndescs > EFX_RXQ_MAXNDESCS)) {
rc = EINVAL;
goto fail1;
}
@@ -1335,7 +1356,7 @@ siena_rx_qcreate(
}
for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS);
size++)
- if ((1 << size) == (int)(n / EFX_RXQ_MINNDESCS))
+ if ((1 << size) == (int)(ndescs / EFX_RXQ_MINNDESCS))
break;
if (id + (1 << size) >= encp->enc_buftbl_limit) {
rc = EINVAL;
@@ -1344,24 +1365,22 @@ siena_rx_qcreate(
switch (type) {
case EFX_RXQ_TYPE_DEFAULT:
- jumbo = B_FALSE;
break;
-#if EFSYS_OPT_RX_SCATTER
- case EFX_RXQ_TYPE_SCATTER:
- if (enp->en_family < EFX_FAMILY_SIENA) {
- rc = EINVAL;
- goto fail4;
- }
- jumbo = B_TRUE;
- break;
-#endif /* EFSYS_OPT_RX_SCATTER */
-
default:
rc = EINVAL;
goto fail4;
}
+ if (flags & EFX_RXQ_FLAG_SCATTER) {
+#if EFSYS_OPT_RX_SCATTER
+ jumbo = B_TRUE;
+#else
+ rc = EINVAL;
+ goto fail5;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ }
+
/* Set up the new descriptor queue */
EFX_POPULATE_OWORD_7(oword,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, id,
@@ -1377,6 +1396,10 @@ siena_rx_qcreate(
return (0);
+#if !EFSYS_OPT_RX_SCATTER
+fail5:
+ EFSYS_PROBE(fail5);
+#endif
fail4:
EFSYS_PROBE(fail4);
fail3:
diff --git a/drivers/net/sfc/base/efx_sram.c b/drivers/net/sfc/base/efx_sram.c
index 5f4edea7..1f0ba0a9 100644
--- a/drivers/net/sfc/base/efx_sram.c
+++ b/drivers/net/sfc/base/efx_sram.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/efx_tunnel.c b/drivers/net/sfc/base/efx_tunnel.c
new file mode 100644
index 00000000..25fa976f
--- /dev/null
+++ b/drivers/net/sfc/base/efx_tunnel.c
@@ -0,0 +1,463 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_TUNNEL
+
+#if EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON
+static const efx_tunnel_ops_t __efx_tunnel_dummy_ops = {
+ NULL, /* eto_udp_encap_supported */
+ NULL, /* eto_reconfigure */
+};
+#endif /* EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+static __checkReturn boolean_t
+medford_udp_encap_supported(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+medford_tunnel_reconfigure(
+ __in efx_nic_t *enp);
+
+static const efx_tunnel_ops_t __efx_tunnel_medford_ops = {
+ medford_udp_encap_supported, /* eto_udp_encap_supported */
+ medford_tunnel_reconfigure, /* eto_reconfigure */
+};
+#endif /* EFSYS_OPT_MEDFORD */
+
+static __checkReturn efx_rc_t
+efx_mcdi_set_tunnel_encap_udp_ports(
+ __in efx_nic_t *enp,
+ __in efx_tunnel_cfg_t *etcp,
+ __in boolean_t unloading,
+ __out boolean_t *resetting)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX,
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN)];
+ efx_word_t flags;
+ efx_rc_t rc;
+ unsigned int i;
+ unsigned int entries_num;
+
+ if (etcp == NULL)
+ entries_num = 0;
+ else
+ entries_num = etcp->etc_udp_entries_num;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS;
+ req.emr_in_buf = payload;
+ req.emr_in_length =
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(entries_num);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN;
+
+ EFX_POPULATE_WORD_1(flags,
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
+ (unloading == B_TRUE) ? 1 : 0);
+ MCDI_IN_SET_WORD(req, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS,
+ EFX_WORD_FIELD(flags, EFX_WORD_0));
+
+ MCDI_IN_SET_WORD(req, SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES,
+ entries_num);
+
+ for (i = 0; i < entries_num; ++i) {
+ uint16_t mcdi_udp_protocol;
+
+ switch (etcp->etc_udp_entries[i].etue_protocol) {
+ case EFX_TUNNEL_PROTOCOL_VXLAN:
+ mcdi_udp_protocol = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
+ break;
+ case EFX_TUNNEL_PROTOCOL_GENEVE:
+ mcdi_udp_protocol = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * UDP port is MCDI native little-endian in the request
+ * and EFX_POPULATE_DWORD cares about conversion from
+ * host/CPU byte order to little-endian.
+ */
+ EFX_STATIC_ASSERT(sizeof (efx_dword_t) ==
+ TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN);
+ EFX_POPULATE_DWORD_2(
+ MCDI_IN2(req, efx_dword_t,
+ SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES)[i],
+ TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
+ etcp->etc_udp_entries[i].etue_port,
+ TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
+ mcdi_udp_protocol);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used !=
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *resetting = MCDI_OUT_WORD_FIELD(req,
+ SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS,
+ SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tunnel_init(
+ __in efx_nic_t *enp)
+{
+ efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
+ const efx_tunnel_ops_t *etop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TUNNEL));
+
+ EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
+ MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ etop = &__efx_tunnel_dummy_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ etop = &__efx_tunnel_dummy_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ etop = &__efx_tunnel_medford_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ memset(etcp->etc_udp_entries, 0, sizeof (etcp->etc_udp_entries));
+ etcp->etc_udp_entries_num = 0;
+
+ enp->en_etop = etop;
+ enp->en_mod_flags |= EFX_MOD_TUNNEL;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_etop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TUNNEL;
+
+ return (rc);
+}
+
+ void
+efx_tunnel_fini(
+ __in efx_nic_t *enp)
+{
+ boolean_t resetting;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL);
+
+ if ((enp->en_etop->eto_udp_encap_supported != NULL) &&
+ enp->en_etop->eto_udp_encap_supported(enp)) {
+ /*
+ * The UNLOADING flag allows the MC to suppress the datapath
+ * reset if it was set on the last call to
+ * MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS by all functions
+ */
+ (void) efx_mcdi_set_tunnel_encap_udp_ports(enp, NULL, B_TRUE,
+ &resetting);
+ }
+
+ enp->en_etop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TUNNEL;
+}
+
+static __checkReturn efx_rc_t
+efx_tunnel_config_find_udp_tunnel_entry(
+ __in efx_tunnel_cfg_t *etcp,
+ __in uint16_t port,
+ __out unsigned int *entryp)
+{
+ unsigned int i;
+
+ for (i = 0; i < etcp->etc_udp_entries_num; ++i) {
+ efx_tunnel_udp_entry_t *p = &etcp->etc_udp_entries[i];
+
+ if (p->etue_port == port) {
+ *entryp = i;
+ return (0);
+ }
+ }
+
+ return (ENOENT);
+}
+
+ __checkReturn efx_rc_t
+efx_tunnel_config_udp_add(
+ __in efx_nic_t *enp,
+ __in uint16_t port /* host/cpu-endian */,
+ __in efx_tunnel_protocol_t protocol)
+{
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
+ efsys_lock_state_t state;
+ efx_rc_t rc;
+ unsigned int entry;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL);
+
+ if (protocol >= EFX_TUNNEL_NPROTOS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((encp->enc_tunnel_encapsulations_supported &
+ (1u << protocol)) == 0) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = efx_tunnel_config_find_udp_tunnel_entry(etcp, port, &entry);
+ if (rc == 0) {
+ rc = EEXIST;
+ goto fail3;
+ }
+
+ if (etcp->etc_udp_entries_num ==
+ encp->enc_tunnel_config_udp_entries_max) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ etcp->etc_udp_entries[etcp->etc_udp_entries_num].etue_port = port;
+ etcp->etc_udp_entries[etcp->etc_udp_entries_num].etue_protocol =
+ protocol;
+
+ etcp->etc_udp_entries_num++;
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_PROBE(fail3);
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tunnel_config_udp_remove(
+ __in efx_nic_t *enp,
+ __in uint16_t port /* host/cpu-endian */,
+ __in efx_tunnel_protocol_t protocol)
+{
+ efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
+ efsys_lock_state_t state;
+ unsigned int entry;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = efx_tunnel_config_find_udp_tunnel_entry(etcp, port, &entry);
+ if (rc != 0)
+ goto fail1;
+
+ if (etcp->etc_udp_entries[entry].etue_protocol != protocol) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFSYS_ASSERT3U(etcp->etc_udp_entries_num, >, 0);
+ etcp->etc_udp_entries_num--;
+
+ if (entry < etcp->etc_udp_entries_num) {
+ memmove(&etcp->etc_udp_entries[entry],
+ &etcp->etc_udp_entries[entry + 1],
+ (etcp->etc_udp_entries_num - entry) *
+ sizeof (etcp->etc_udp_entries[0]));
+ }
+
+ memset(&etcp->etc_udp_entries[etcp->etc_udp_entries_num], 0,
+ sizeof (etcp->etc_udp_entries[0]));
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+ void
+efx_tunnel_config_clear(
+ __in efx_nic_t *enp)
+{
+ efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ etcp->etc_udp_entries_num = 0;
+ memset(etcp->etc_udp_entries, 0, sizeof (etcp->etc_udp_entries));
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+ __checkReturn efx_rc_t
+efx_tunnel_reconfigure(
+ __in efx_nic_t *enp)
+{
+ const efx_tunnel_ops_t *etop = enp->en_etop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TUNNEL);
+
+ if (etop->eto_reconfigure == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = enp->en_etop->eto_reconfigure(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_MEDFORD
+static __checkReturn boolean_t
+medford_udp_encap_supported(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint32_t udp_tunnels_mask = 0;
+
+ udp_tunnels_mask |= (1u << EFX_TUNNEL_PROTOCOL_VXLAN);
+ udp_tunnels_mask |= (1u << EFX_TUNNEL_PROTOCOL_GENEVE);
+
+ return ((encp->enc_tunnel_encapsulations_supported &
+ udp_tunnels_mask) == 0 ? B_FALSE : B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+medford_tunnel_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
+ efx_rc_t rc;
+ boolean_t resetting;
+ efsys_lock_state_t state;
+ efx_tunnel_cfg_t etc;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ memcpy(&etc, etcp, sizeof (etc));
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (medford_udp_encap_supported(enp) == B_FALSE) {
+ /*
+ * It is OK to apply empty UDP tunnel ports when UDP
+ * tunnel encapsulations are not supported - just nothing
+ * should be done.
+ */
+ if (etc.etc_udp_entries_num == 0)
+ return (0);
+ rc = ENOTSUP;
+ goto fail1;
+ } else {
+ /*
+ * All PCI functions can see a reset upon the
+ * MCDI request completion
+ */
+ rc = efx_mcdi_set_tunnel_encap_udp_ports(enp, &etc, B_FALSE,
+ &resetting);
+ if (rc != 0)
+ goto fail2;
+
+ /*
+ * Although the caller should be able to handle MC reboot,
+ * it might come in handy to report the impending reboot
+ * by returning EAGAIN
+ */
+ return ((resetting) ? EAGAIN : 0);
+ }
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_MEDFORD */
+
+#endif /* EFSYS_OPT_TUNNEL */
diff --git a/drivers/net/sfc/base/efx_tx.c b/drivers/net/sfc/base/efx_tx.c
index ceb29206..4e02c869 100644
--- a/drivers/net/sfc/base/efx_tx.c
+++ b/drivers/net/sfc/base/efx_tx.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -57,7 +33,7 @@ siena_tx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint16_t flags,
__in efx_evq_t *eep,
@@ -68,13 +44,13 @@ static void
siena_tx_qdestroy(
__in efx_txq_t *etp);
-static __checkReturn efx_rc_t
+static __checkReturn efx_rc_t
siena_tx_qpost(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_buffer_t *eb,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp);
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *eb,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
static void
siena_tx_qpush(
@@ -95,13 +71,13 @@ static void
siena_tx_qenable(
__in efx_txq_t *etp);
- __checkReturn efx_rc_t
+ __checkReturn efx_rc_t
siena_tx_qdesc_post(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_desc_t *ed,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp);
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_desc_t *ed,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
void
siena_tx_qdesc_dma_create(
@@ -290,7 +266,7 @@ efx_tx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint16_t flags,
__in efx_evq_t *eep,
@@ -298,14 +274,14 @@ efx_tx_qcreate(
__out unsigned int *addedp)
{
const efx_tx_ops_t *etxop = enp->en_etxop;
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_txq_t *etp;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
- EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, encp->enc_txq_limit);
+ EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <,
+ enp->en_nic_cfg.enc_txq_limit);
/* Allocate an TXQ object */
EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp);
@@ -318,14 +294,14 @@ efx_tx_qcreate(
etp->et_magic = EFX_TXQ_MAGIC;
etp->et_enp = enp;
etp->et_index = index;
- etp->et_mask = n - 1;
+ etp->et_mask = ndescs - 1;
etp->et_esmp = esmp;
/* Initial descriptor index may be modified by etxo_qcreate */
*addedp = 0;
if ((rc = etxop->etxo_qcreate(enp, index, label, esmp,
- n, id, flags, eep, etp, addedp)) != 0)
+ ndescs, id, flags, eep, etp, addedp)) != 0)
goto fail2;
enp->en_tx_qcount++;
@@ -359,13 +335,13 @@ efx_tx_qdestroy(
EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
}
- __checkReturn efx_rc_t
+ __checkReturn efx_rc_t
efx_tx_qpost(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_buffer_t *eb,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp)
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *eb,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
{
efx_nic_t *enp = etp->et_enp;
const efx_tx_ops_t *etxop = enp->en_etxop;
@@ -373,8 +349,7 @@ efx_tx_qpost(
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
- if ((rc = etxop->etxo_qpost(etp, eb,
- n, completed, addedp)) != 0)
+ if ((rc = etxop->etxo_qpost(etp, eb, ndescs, completed, addedp)) != 0)
goto fail1;
return (0);
@@ -550,13 +525,13 @@ fail1:
return (rc);
}
- __checkReturn efx_rc_t
+ __checkReturn efx_rc_t
efx_tx_qdesc_post(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_desc_t *ed,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp)
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_desc_t *ed,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
{
efx_nic_t *enp = etp->et_enp;
const efx_tx_ops_t *etxop = enp->en_etxop;
@@ -565,7 +540,7 @@ efx_tx_qdesc_post(
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
if ((rc = etxop->etxo_qdesc_post(etp, ed,
- n, completed, addedp)) != 0)
+ ndescs, completed, addedp)) != 0)
goto fail1;
return (0);
@@ -724,22 +699,22 @@ siena_tx_init(
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
-static __checkReturn efx_rc_t
+static __checkReturn efx_rc_t
siena_tx_qpost(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_buffer_t *eb,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp)
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_buffer_t *eb,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
{
unsigned int added = *addedp;
unsigned int i;
int rc = ENOSPC;
- if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1))
+ if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1))
goto fail1;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < ndescs; i++) {
efx_buffer_t *ebp = &eb[i];
efsys_dma_addr_t start = ebp->eb_addr;
size_t size = ebp->eb_size;
@@ -889,7 +864,7 @@ siena_tx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efsys_mem_t *esmp,
- __in size_t n,
+ __in size_t ndescs,
__in uint32_t id,
__in uint16_t flags,
__in efx_evq_t *eep,
@@ -899,6 +874,7 @@ siena_tx_qcreate(
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_oword_t oword;
uint32_t size;
+ uint16_t inner_csum;
efx_rc_t rc;
_NOTE(ARGUNUSED(esmp))
@@ -910,7 +886,8 @@ siena_tx_qcreate(
EFSYS_ASSERT(ISP2(encp->enc_txq_max_ndescs));
EFX_STATIC_ASSERT(ISP2(EFX_TXQ_MINNDESCS));
- if (!ISP2(n) || (n < EFX_TXQ_MINNDESCS) || (n > EFX_EVQ_MAXNEVS)) {
+ if (!ISP2(ndescs) ||
+ (ndescs < EFX_TXQ_MINNDESCS) || (ndescs > EFX_EVQ_MAXNEVS)) {
rc = EINVAL;
goto fail1;
}
@@ -921,13 +898,19 @@ siena_tx_qcreate(
for (size = 0;
(1 << size) <= (int)(encp->enc_txq_max_ndescs / EFX_TXQ_MINNDESCS);
size++)
- if ((1 << size) == (int)(n / EFX_TXQ_MINNDESCS))
+ if ((1 << size) == (int)(ndescs / EFX_TXQ_MINNDESCS))
break;
if (id + (1 << size) >= encp->enc_buftbl_limit) {
rc = EINVAL;
goto fail3;
}
+ inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP;
+ if ((flags & inner_csum) != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
/* Set up the new descriptor queue */
*addedp = 0;
@@ -950,6 +933,8 @@ siena_tx_qcreate(
return (0);
+fail4:
+ EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
@@ -960,24 +945,24 @@ fail1:
return (rc);
}
- __checkReturn efx_rc_t
+ __checkReturn efx_rc_t
siena_tx_qdesc_post(
- __in efx_txq_t *etp,
- __in_ecount(n) efx_desc_t *ed,
- __in unsigned int n,
- __in unsigned int completed,
- __inout unsigned int *addedp)
+ __in efx_txq_t *etp,
+ __in_ecount(ndescs) efx_desc_t *ed,
+ __in unsigned int ndescs,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
{
unsigned int added = *addedp;
unsigned int i;
efx_rc_t rc;
- if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
rc = ENOSPC;
goto fail1;
}
- for (i = 0; i < n; i++) {
+ for (i = 0; i < ndescs; i++) {
efx_desc_t *edp = &ed[i];
unsigned int id;
size_t offset;
@@ -989,7 +974,7 @@ siena_tx_qdesc_post(
}
EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
- unsigned int, added, unsigned int, n);
+ unsigned int, added, unsigned int, ndescs);
EFX_TX_QSTAT_INCR(etp, TX_POST);
diff --git a/drivers/net/sfc/base/efx_types.h b/drivers/net/sfc/base/efx_types.h
index b8ee14a6..0581f67f 100644
--- a/drivers/net/sfc/base/efx_types.h
+++ b/drivers/net/sfc/base/efx_types.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*
* Ackowledgement to Fen Systems Ltd.
*/
@@ -116,7 +92,8 @@ extern "C" {
#define EFX_DWORD_3_LBN 96
#define EFX_DWORD_3_WIDTH 32
-/* There are intentionally no EFX_QWORD_0 or EFX_QWORD_1 field definitions
+/*
+ * There are intentionally no EFX_QWORD_0 or EFX_QWORD_1 field definitions
* here as the implementaion of EFX_QWORD_FIELD and EFX_OWORD_FIELD do not
* support field widths larger than 32 bits.
*/
diff --git a/drivers/net/sfc/base/efx_vpd.c b/drivers/net/sfc/base/efx_vpd.c
index 1e47df2c..7b8138f3 100644
--- a/drivers/net/sfc/base/efx_vpd.c
+++ b/drivers/net/sfc/base/efx_vpd.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -928,7 +904,7 @@ efx_vpd_hunk_set(
}
/* Modify tag length (large resource type) */
- taglen += (dest - source);
+ taglen += (uint16_t)(dest - source);
EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen);
data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0);
data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1);
diff --git a/drivers/net/sfc/base/hunt_impl.h b/drivers/net/sfc/base/hunt_impl.h
index 0e0c870f..d8dddce8 100644
--- a/drivers/net/sfc/base/hunt_impl.h
+++ b/drivers/net/sfc/base/hunt_impl.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_HUNT_IMPL_H
diff --git a/drivers/net/sfc/base/hunt_nic.c b/drivers/net/sfc/base/hunt_nic.c
index 19fb7cfb..d03cc138 100644
--- a/drivers/net/sfc/base/hunt_nic.c
+++ b/drivers/net/sfc/base/hunt_nic.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/mcdi_mon.c b/drivers/net/sfc/base/mcdi_mon.c
index c5360c31..e4de0dab 100644
--- a/drivers/net/sfc/base/mcdi_mon.c
+++ b/drivers/net/sfc/base/mcdi_mon.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -157,6 +133,8 @@ static const struct mcdi_sensor_map_s {
STAT(Px, CONTROLLER_TDIODE_TEMP), /* 0x4e CONTROLLER_TDIODE_TEMP */
STAT(Px, BOARD_FRONT_TEMP), /* 0x4f BOARD_FRONT_TEMP */
STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */
+ STAT(Px, I1V8), /* 0x51 IN_I1V8 */
+ STAT(Px, I2V5), /* 0x52 IN_I2V5 */
};
#define MCDI_STATIC_SENSOR_ASSERT(_field) \
@@ -262,7 +240,6 @@ mcdi_mon_ev(
__out efx_mon_stat_value_t *valuep)
{
efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
uint16_t port_mask;
uint16_t sensor;
uint16_t state;
@@ -278,11 +255,13 @@ mcdi_mon_ev(
value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE);
/* Hardware must support this MCDI sensor */
- EFSYS_ASSERT3U(sensor, <, (8 * encp->enc_mcdi_sensor_mask_size));
+ EFSYS_ASSERT3U(sensor, <,
+ (8 * enp->en_nic_cfg.enc_mcdi_sensor_mask_size));
EFSYS_ASSERT((sensor % MCDI_MON_PAGE_SIZE) != MC_CMD_SENSOR_PAGE0_NEXT);
- EFSYS_ASSERT(encp->enc_mcdi_sensor_maskp != NULL);
- EFSYS_ASSERT((encp->enc_mcdi_sensor_maskp[sensor / MCDI_MON_PAGE_SIZE] &
- (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0);
+ EFSYS_ASSERT(enp->en_nic_cfg.enc_mcdi_sensor_maskp != NULL);
+ EFSYS_ASSERT(
+ (enp->en_nic_cfg.enc_mcdi_sensor_maskp[sensor/MCDI_MON_PAGE_SIZE] &
+ (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0);
/* But we don't have to understand it */
if (sensor >= EFX_ARRAY_SIZE(mcdi_sensor_map)) {
@@ -393,6 +372,11 @@ efx_mcdi_sensor_info(
EFSYS_ASSERT(sensor_maskp != NULL);
+ if (npages < 1) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
for (page = 0; page < npages; page++) {
uint32_t mask;
@@ -409,7 +393,7 @@ efx_mcdi_sensor_info(
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail1;
+ goto fail2;
}
mask = MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK);
@@ -417,18 +401,20 @@ efx_mcdi_sensor_info(
if ((page != (npages - 1)) &&
((mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) == 0)) {
rc = EINVAL;
- goto fail2;
+ goto fail3;
}
sensor_maskp[page] = mask;
}
if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) {
rc = EINVAL;
- goto fail3;
+ goto fail4;
}
return (0);
+fail4:
+ EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
diff --git a/drivers/net/sfc/base/mcdi_mon.h b/drivers/net/sfc/base/mcdi_mon.h
index e07b5280..5aa6a6a2 100644
--- a/drivers/net/sfc/base/mcdi_mon.h
+++ b/drivers/net/sfc/base/mcdi_mon.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_MCDI_MON_H
diff --git a/drivers/net/sfc/base/medford_impl.h b/drivers/net/sfc/base/medford_impl.h
index de2f5cf0..d076afa2 100644
--- a/drivers/net/sfc/base/medford_impl.h
+++ b/drivers/net/sfc/base/medford_impl.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2015-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_MEDFORD_IMPL_H
@@ -35,14 +11,6 @@
extern "C" {
#endif
-/* Alignment requirement for value written to RX WPTR:
- * the WPTR must be aligned to an 8 descriptor boundary
- *
- * FIXME: Is this the same on Medford as Huntington?
- */
-#define MEDFORD_RX_WPTR_ALIGN 8
-
-
#ifndef ER_EZ_TX_PIOBUF_SIZE
#define ER_EZ_TX_PIOBUF_SIZE 4096
diff --git a/drivers/net/sfc/base/medford_nic.c b/drivers/net/sfc/base/medford_nic.c
index d361d654..1365e9e3 100644
--- a/drivers/net/sfc/base/medford_nic.c
+++ b/drivers/net/sfc/base/medford_nic.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2015-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -178,7 +154,8 @@ medford_board_cfg(
if (EFX_PCI_FUNCTION_IS_PF(encp)) {
rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
- /* Disable static config checking for Medford NICs, ONLY
+ /*
+ * Disable static config checking for Medford NICs, ONLY
* for manufacturing test and setup at the factory, to
* allow the static config to be installed.
*/
diff --git a/drivers/net/sfc/base/meson.build b/drivers/net/sfc/base/meson.build
new file mode 100644
index 00000000..f1e49735
--- /dev/null
+++ b/drivers/net/sfc/base/meson.build
@@ -0,0 +1,74 @@
+# Copyright (c) 2016-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+sources = [
+ 'efx_bootcfg.c',
+ 'efx_crc32.c',
+ 'efx_ev.c',
+ 'efx_filter.c',
+ 'efx_hash.c',
+ 'efx_intr.c',
+ 'efx_lic.c',
+ 'efx_mac.c',
+ 'efx_mcdi.c',
+ 'efx_mon.c',
+ 'efx_nic.c',
+ 'efx_nvram.c',
+ 'efx_phy.c',
+ 'efx_port.c',
+ 'efx_rx.c',
+ 'efx_sram.c',
+ 'efx_tunnel.c',
+ 'efx_tx.c',
+ 'efx_vpd.c',
+ 'mcdi_mon.c',
+ 'siena_mac.c',
+ 'siena_mcdi.c',
+ 'siena_nic.c',
+ 'siena_nvram.c',
+ 'siena_phy.c',
+ 'siena_sram.c',
+ 'siena_vpd.c',
+ 'ef10_ev.c',
+ 'ef10_filter.c',
+ 'ef10_intr.c',
+ 'ef10_mac.c',
+ 'ef10_mcdi.c',
+ 'ef10_nic.c',
+ 'ef10_nvram.c',
+ 'ef10_phy.c',
+ 'ef10_rx.c',
+ 'ef10_tx.c',
+ 'ef10_vpd.c',
+ 'hunt_nic.c',
+ 'medford_nic.c'
+]
+
+extra_flags = [
+ '-Wno-sign-compare',
+ '-Wno-unused-parameter',
+ '-Wno-unused-variable',
+ '-Wno-empty-body',
+ '-Wno-unused-but-set-variable'
+]
+
+c_args = cflags
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+if build
+ base_lib = static_library('sfc_base', sources,
+ include_directories: includes,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+
+ base_objs = base_lib.extract_all_objects()
+else
+ base_objs = []
+endif
diff --git a/drivers/net/sfc/base/siena_flash.h b/drivers/net/sfc/base/siena_flash.h
index e2700554..91a9fe05 100644
--- a/drivers/net/sfc/base/siena_flash.h
+++ b/drivers/net/sfc/base/siena_flash.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2007-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2007-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_SIENA_FLASH_H
@@ -113,15 +89,21 @@ typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */
efx_word_t checksum; /* of whole header area + firmware image */
efx_word_t firmware_version_d;
efx_byte_t mcfw_subtype;
- efx_byte_t generation; /* Valid for medford, SBZ for earlier chips */
+ efx_byte_t generation; /* MC (Medford and later): MC partition generation when */
+ /* written to NVRAM. */
+ /* MUM & SUC images: subtype. */
+ /* (Otherwise set to 0) */
efx_dword_t firmware_text_offset; /* offset to firmware .text */
efx_dword_t firmware_text_size; /* length of firmware .text, in bytes */
efx_dword_t firmware_data_offset; /* offset to firmware .data */
efx_dword_t firmware_data_size; /* length of firmware .data, in bytes */
efx_byte_t spi_rate; /* SPI rate for reading image, 0 is BootROM default */
efx_byte_t spi_phase_adj; /* SPI SDO/SCL phase adjustment, 0 is default (no adj) */
- efx_word_t xpm_sector; /* The sector that contains the key, or 0xffff if unsigned (medford) SBZ (earlier) */
- efx_dword_t reserved_c[7]; /* (set to 0) */
+ efx_word_t xpm_sector; /* XPM (MEDFORD and later): The sector that contains */
+ /* the key, or 0xffff if unsigned. (Otherwise set to 0) */
+ efx_byte_t mumfw_subtype; /* MUM & SUC images: subtype. (Otherwise set to 0) */
+ efx_byte_t reserved_b[3]; /* (set to 0) */
+ efx_dword_t reserved_c[6]; /* (set to 0) */
} siena_mc_boot_hdr_t;
#define SIENA_MC_BOOT_HDR_PADDING \
diff --git a/drivers/net/sfc/base/siena_impl.h b/drivers/net/sfc/base/siena_impl.h
index ea6de983..d70bbff8 100644
--- a/drivers/net/sfc/base/siena_impl.h
+++ b/drivers/net/sfc/base/siena_impl.h
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#ifndef _SYS_SIENA_IMPL_H
@@ -40,8 +16,17 @@
extern "C" {
#endif
+#ifndef EFX_TXQ_DC_SIZE
+#define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */
+#endif
+#ifndef EFX_RXQ_DC_SIZE
+#define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */
+#endif
+#define EFX_TXQ_DC_NDESCS(_dcsize) (8 << (_dcsize))
+
#define SIENA_NVRAM_CHUNK 0x80
+
extern __checkReturn efx_rc_t
siena_nic_probe(
__in efx_nic_t *enp);
@@ -56,6 +41,15 @@ siena_nic_init(
#if EFSYS_OPT_DIAG
+extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[];
+
+typedef struct siena_register_set_s {
+ unsigned int address;
+ unsigned int step;
+ unsigned int rows;
+ efx_oword_t mask;
+} siena_register_set_t;
+
extern __checkReturn efx_rc_t
siena_nic_register_test(
__in efx_nic_t *enp);
@@ -143,7 +137,8 @@ siena_nvram_partn_lock(
extern __checkReturn efx_rc_t
siena_nvram_partn_unlock(
__in efx_nic_t *enp,
- __in uint32_t partn);
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp);
extern __checkReturn efx_rc_t
siena_nvram_get_dynamic_cfg(
@@ -215,7 +210,8 @@ siena_nvram_partn_write(
extern __checkReturn efx_rc_t
siena_nvram_partn_rw_finish(
__in efx_nic_t *enp,
- __in uint32_t partn);
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp);
extern __checkReturn efx_rc_t
siena_nvram_partn_get_version(
diff --git a/drivers/net/sfc/base/siena_mac.c b/drivers/net/sfc/base/siena_mac.c
index 29bbff8a..904e03ed 100644
--- a/drivers/net/sfc/base/siena_mac.c
+++ b/drivers/net/sfc/base/siena_mac.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/siena_mcdi.c b/drivers/net/sfc/base/siena_mcdi.c
index 63c29fcb..ef844591 100644
--- a/drivers/net/sfc/base/siena_mcdi.c
+++ b/drivers/net/sfc/base/siena_mcdi.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2012-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2012-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/siena_nic.c b/drivers/net/sfc/base/siena_nic.c
index fcc8f151..f223c9be 100644
--- a/drivers/net/sfc/base/siena_nic.c
+++ b/drivers/net/sfc/base/siena_nic.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -174,7 +150,7 @@ siena_board_cfg(
encp->enc_required_pcie_bandwidth_mbps = 2 * 10000;
encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN2;
- encp->enc_fw_verified_nvram_update_required = B_FALSE;
+ encp->enc_nvram_update_verify_result_supported = B_FALSE;
return (0);
@@ -190,7 +166,9 @@ static __checkReturn efx_rc_t
siena_phy_cfg(
__in efx_nic_t *enp)
{
+#if EFSYS_OPT_PHY_STATS
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+#endif /* EFSYS_OPT_PHY_STATS */
efx_rc_t rc;
/* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
@@ -211,6 +189,77 @@ fail1:
return (rc);
}
+#define SIENA_BIU_MAGIC0 0x01234567
+#define SIENA_BIU_MAGIC1 0xfedcba98
+
+static __checkReturn efx_rc_t
+siena_nic_biu_test(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ /*
+ * Write magic values to scratch registers 0 and 1, then
+ * verify that the values were written correctly. Interleave
+ * the accesses to ensure that the BIU is not just reading
+ * back the cached value that was last written.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ /*
+ * Perform the same test, with the values swapped. This
+ * ensures that subsequent tests don't start with the correct
+ * values already written into the scratch registers.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
__checkReturn efx_rc_t
siena_nic_probe(
__in efx_nic_t *enp)
@@ -225,7 +274,7 @@ siena_nic_probe(
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
/* Test BIU */
- if ((rc = efx_nic_biu_test(enp)) != 0)
+ if ((rc = siena_nic_biu_test(enp)) != 0)
goto fail1;
/* Clear the region register */
@@ -455,7 +504,7 @@ siena_nic_unprobe(
#if EFSYS_OPT_DIAG
-static efx_register_set_t __siena_registers[] = {
+static siena_register_set_t __siena_registers[] = {
{ FR_AZ_ADR_REGION_REG_OFST, 0, 1 },
{ FR_CZ_USR_EV_CFG_OFST, 0, 1 },
{ FR_AZ_RX_CFG_REG_OFST, 0, 1 },
@@ -487,7 +536,7 @@ static const uint32_t __siena_register_masks[] = {
0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000
};
-static efx_register_set_t __siena_tables[] = {
+static siena_register_set_t __siena_tables[] = {
{ FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP,
FR_AZ_RX_FILTER_TBL0_ROWS },
{ FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP,
@@ -514,10 +563,144 @@ static const uint32_t __siena_table_masks[] = {
};
__checkReturn efx_rc_t
+siena_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in siena_register_set_t *rsp,
+ __in size_t count)
+{
+ unsigned int bit;
+ efx_oword_t original;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ while (count > 0) {
+ /* This function is only suitable for registers */
+ EFSYS_ASSERT(rsp->rows == 1);
+
+ /* bit sweep on and off */
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+ for (bit = 0; bit < 128; bit++) {
+ /* Is this bit in the mask? */
+ if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit))
+ continue;
+
+ /* Test this bit can be set in isolation */
+ reg = original;
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFX_SET_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ /* Test this bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, rsp->mask);
+ EFX_CLEAR_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail2;
+ }
+ }
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+
+ --count;
+ ++rsp;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in siena_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count)
+{
+ efx_sram_pattern_fn_t func;
+ unsigned int index;
+ unsigned int address;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[pattern];
+
+ while (count > 0) {
+ /* Write */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_WRITEO(enp->en_esbp, address, &reg, B_TRUE);
+
+ address += rsp->step;
+ }
+
+ /* Read */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ address += rsp->step;
+ }
+
+ ++rsp;
+ --count;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
siena_nic_register_test(
__in efx_nic_t *enp)
{
- efx_register_set_t *rsp;
+ siena_register_set_t *rsp;
const uint32_t *dwordp;
unsigned int nitems;
unsigned int count;
@@ -551,21 +734,21 @@ siena_nic_register_test(
rsp->mask.eo_u32[3] = *dwordp++;
}
- if ((rc = efx_nic_test_registers(enp, __siena_registers,
+ if ((rc = siena_nic_test_registers(enp, __siena_registers,
EFX_ARRAY_SIZE(__siena_registers))) != 0)
goto fail1;
- if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ if ((rc = siena_nic_test_tables(enp, __siena_tables,
EFX_PATTERN_BYTE_ALTERNATE,
EFX_ARRAY_SIZE(__siena_tables))) != 0)
goto fail2;
- if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ if ((rc = siena_nic_test_tables(enp, __siena_tables,
EFX_PATTERN_BYTE_CHANGING,
EFX_ARRAY_SIZE(__siena_tables))) != 0)
goto fail3;
- if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ if ((rc = siena_nic_test_tables(enp, __siena_tables,
EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0)
goto fail4;
diff --git a/drivers/net/sfc/base/siena_nvram.c b/drivers/net/sfc/base/siena_nvram.c
index af4cf172..e72bba0b 100644
--- a/drivers/net/sfc/base/siena_nvram.c
+++ b/drivers/net/sfc/base/siena_nvram.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -170,7 +146,8 @@ fail1:
__checkReturn efx_rc_t
siena_nvram_partn_unlock(
__in efx_nic_t *enp,
- __in uint32_t partn)
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp)
{
boolean_t reboot;
efx_rc_t rc;
@@ -183,7 +160,7 @@ siena_nvram_partn_unlock(
partn == MC_CMD_NVRAM_TYPE_PHY_PORT1 ||
partn == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO);
- rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, NULL);
+ rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, verify_resultp);
if (rc != 0)
goto fail1;
@@ -239,6 +216,7 @@ siena_nvram_type_to_partn(
efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
unsigned int i;
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
EFSYS_ASSERT(partnp != NULL);
@@ -587,11 +565,12 @@ fail1:
__checkReturn efx_rc_t
siena_nvram_partn_rw_finish(
__in efx_nic_t *enp,
- __in uint32_t partn)
+ __in uint32_t partn,
+ __out_opt uint32_t *verify_resultp)
{
efx_rc_t rc;
- if ((rc = siena_nvram_partn_unlock(enp, partn)) != 0)
+ if ((rc = siena_nvram_partn_unlock(enp, partn, verify_resultp)) != 0)
goto fail1;
return (0);
@@ -705,7 +684,7 @@ siena_nvram_partn_set_version(
EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
- siena_nvram_partn_unlock(enp, dcfg_partn);
+ siena_nvram_partn_unlock(enp, dcfg_partn, NULL);
return (0);
diff --git a/drivers/net/sfc/base/siena_phy.c b/drivers/net/sfc/base/siena_phy.c
index b90ccabc..d638646b 100644
--- a/drivers/net/sfc/base/siena_phy.c
+++ b/drivers/net/sfc/base/siena_phy.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -273,7 +249,9 @@ siena_phy_reconfigure(
MAX(MC_CMD_SET_LINK_IN_LEN,
MC_CMD_SET_LINK_OUT_LEN))];
uint32_t cap_mask;
+#if EFSYS_OPT_PHY_LED_CONTROL
unsigned int led_mode;
+#endif
unsigned int speed;
efx_rc_t rc;
diff --git a/drivers/net/sfc/base/siena_sram.c b/drivers/net/sfc/base/siena_sram.c
index 572c2e9a..c9ef786c 100644
--- a/drivers/net/sfc/base/siena_sram.c
+++ b/drivers/net/sfc/base/siena_sram.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
diff --git a/drivers/net/sfc/base/siena_vpd.c b/drivers/net/sfc/base/siena_vpd.c
index 4fb2e426..f188eb58 100644
--- a/drivers/net/sfc/base/siena_vpd.c
+++ b/drivers/net/sfc/base/siena_vpd.c
@@ -1,31 +1,7 @@
-/*
- * Copyright (c) 2009-2016 Solarflare Communications Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of the FreeBSD Project.
+ * Copyright (c) 2009-2018 Solarflare Communications Inc.
+ * All rights reserved.
*/
#include "efx.h"
@@ -572,7 +548,7 @@ siena_vpd_write(
EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
- siena_nvram_partn_unlock(enp, dcfg_partn);
+ siena_nvram_partn_unlock(enp, dcfg_partn, NULL);
return (0);
@@ -587,7 +563,7 @@ fail5:
fail4:
EFSYS_PROBE(fail4);
- siena_nvram_partn_unlock(enp, dcfg_partn);
+ siena_nvram_partn_unlock(enp, dcfg_partn, NULL);
fail3:
EFSYS_PROBE(fail3);
fail2:
diff --git a/drivers/net/sfc/efsys.h b/drivers/net/sfc/efsys.h
index f428b624..c7a54c3b 100644
--- a/drivers/net/sfc/efsys.h
+++ b/drivers/net/sfc/efsys.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_COMMON_EFSYS_H
@@ -214,6 +192,8 @@ prefetch_read_once(const volatile void *addr)
#define EFSYS_OPT_RX_PACKED_STREAM 0
+#define EFSYS_OPT_TUNNEL 1
+
/* ID */
typedef struct __efsys_identifier_s efsys_identifier_t;
diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
new file mode 100644
index 00000000..b60a8f58
--- /dev/null
+++ b/drivers/net/sfc/meson.build
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2016-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+if arch_subdir != 'x86'
+ build = false
+endif
+
+allow_experimental_apis = true
+
+extra_flags = []
+
+# Strict-aliasing rules are violated by rte_eth_link to uint64_t casts
+extra_flags += '-Wno-strict-aliasing'
+
+# Enable more warnings
+extra_flags += [
+ '-Wextra',
+ '-Wdisabled-optimization'
+]
+
+# Compiler and version dependent flags
+extra_flags += [
+ '-Waggregate-return',
+ '-Wnested-externs',
+ '-Wbad-function-cast'
+]
+
+# Suppress ICC false positive warning on 'bulk' may be used before its
+# value is set
+extra_flags += '-wd3656'
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'sfc_ethdev.c',
+ 'sfc_kvargs.c',
+ 'sfc.c',
+ 'sfc_mcdi.c',
+ 'sfc_intr.c',
+ 'sfc_ev.c',
+ 'sfc_port.c',
+ 'sfc_rx.c',
+ 'sfc_tx.c',
+ 'sfc_tso.c',
+ 'sfc_filter.c',
+ 'sfc_flow.c',
+ 'sfc_dp.c',
+ 'sfc_ef10_rx.c',
+ 'sfc_ef10_tx.c'
+)
+
+includes += include_directories('base')
diff --git a/drivers/net/sfc/rte_pmd_sfc_efx_version.map b/drivers/net/sfc/rte_pmd_sfc_version.map
index 31eca32e..31eca32e 100644
--- a/drivers/net/sfc/rte_pmd_sfc_efx_version.map
+++ b/drivers/net/sfc/rte_pmd_sfc_version.map
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 49d7e937..ac5fdcaa 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -1,38 +1,17 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* sysconf() */
#include <unistd.h>
#include <rte_errno.h>
+#include <rte_alarm.h>
#include "efx.h"
@@ -270,27 +249,16 @@ sfc_set_drv_limits(struct sfc_adapter *sa)
return efx_nic_set_drv_limits(sa->nic, &lim);
}
-int
-sfc_start(struct sfc_adapter *sa)
+static int
+sfc_try_start(struct sfc_adapter *sa)
{
+ const efx_nic_cfg_t *encp;
int rc;
sfc_log_init(sa, "entry");
SFC_ASSERT(sfc_adapter_is_locked(sa));
-
- switch (sa->state) {
- case SFC_ADAPTER_CONFIGURED:
- break;
- case SFC_ADAPTER_STARTED:
- sfc_info(sa, "already started");
- return 0;
- default:
- rc = EINVAL;
- goto fail_bad_state;
- }
-
- sa->state = SFC_ADAPTER_STARTING;
+ SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
sfc_log_init(sa, "set resource limits");
rc = sfc_set_drv_limits(sa);
@@ -302,6 +270,14 @@ sfc_start(struct sfc_adapter *sa)
if (rc != 0)
goto fail_nic_init;
+ encp = efx_nic_cfg_get(sa->nic);
+ if (encp->enc_tunnel_encapsulations_supported != 0) {
+ sfc_log_init(sa, "apply tunnel config");
+ rc = efx_tunnel_reconfigure(sa->nic);
+ if (rc != 0)
+ goto fail_tunnel_reconfigure;
+ }
+
rc = sfc_intr_start(sa);
if (rc != 0)
goto fail_intr_start;
@@ -326,7 +302,6 @@ sfc_start(struct sfc_adapter *sa)
if (rc != 0)
goto fail_flows_insert;
- sa->state = SFC_ADAPTER_STARTED;
sfc_log_init(sa, "done");
return 0;
@@ -346,10 +321,51 @@ fail_ev_start:
sfc_intr_stop(sa);
fail_intr_start:
+fail_tunnel_reconfigure:
efx_nic_fini(sa->nic);
fail_nic_init:
fail_set_drv_limits:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+int
+sfc_start(struct sfc_adapter *sa)
+{
+ unsigned int start_tries = 3;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ switch (sa->state) {
+ case SFC_ADAPTER_CONFIGURED:
+ break;
+ case SFC_ADAPTER_STARTED:
+ sfc_info(sa, "already started");
+ return 0;
+ default:
+ rc = EINVAL;
+ goto fail_bad_state;
+ }
+
+ sa->state = SFC_ADAPTER_STARTING;
+
+ do {
+ rc = sfc_try_start(sa);
+ } while ((--start_tries > 0) &&
+ (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
+
+ if (rc != 0)
+ goto fail_try_start;
+
+ sa->state = SFC_ADAPTER_STARTED;
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_try_start:
sa->state = SFC_ADAPTER_CONFIGURED;
fail_bad_state:
sfc_log_init(sa, "failed %d", rc);
@@ -389,6 +405,58 @@ sfc_stop(struct sfc_adapter *sa)
sfc_log_init(sa, "done");
}
+static int
+sfc_restart(struct sfc_adapter *sa)
+{
+ int rc;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return EINVAL;
+
+ sfc_stop(sa);
+
+ rc = sfc_start(sa);
+ if (rc != 0)
+ sfc_err(sa, "restart failed");
+
+ return rc;
+}
+
+static void
+sfc_restart_if_required(void *arg)
+{
+ struct sfc_adapter *sa = arg;
+
+ /* If restart is scheduled, clear the flag and do it */
+ if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
+ 1, 0)) {
+ sfc_adapter_lock(sa);
+ if (sa->state == SFC_ADAPTER_STARTED)
+ (void)sfc_restart(sa);
+ sfc_adapter_unlock(sa);
+ }
+}
+
+void
+sfc_schedule_restart(struct sfc_adapter *sa)
+{
+ int rc;
+
+ /* Schedule restart alarm if it is not scheduled yet */
+ if (!rte_atomic32_test_and_set(&sa->restart_required))
+ return;
+
+ rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
+ if (rc == -ENOTSUP)
+ sfc_warn(sa, "alarms are not supported, restart is pending");
+ else if (rc != 0)
+ sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
+ else
+ sfc_info(sa, "restart scheduled");
+}
+
int
sfc_configure(struct sfc_adapter *sa)
{
@@ -583,6 +651,16 @@ sfc_attach(struct sfc_adapter *sa)
if (rc != 0)
goto fail_nic_reset;
+ /*
+ * Probed NIC is sufficient for tunnel init.
+ * Initialize tunnel support to be able to use libefx
+ * efx_tunnel_config_udp_{add,remove}() in any state and
+ * efx_tunnel_reconfigure() on start up.
+ */
+ rc = efx_tunnel_init(enp);
+ if (rc != 0)
+ goto fail_tunnel_init;
+
encp = efx_nic_cfg_get(sa->nic);
if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
@@ -644,6 +722,9 @@ fail_intr_attach:
efx_nic_fini(sa->nic);
fail_estimate_rsrc_limits:
+fail_tunnel_init:
+ efx_tunnel_fini(sa->nic);
+
fail_nic_reset:
sfc_log_init(sa, "failed %d", rc);
@@ -663,6 +744,7 @@ sfc_detach(struct sfc_adapter *sa)
sfc_port_detach(sa);
sfc_ev_detach(sa);
sfc_intr_detach(sa);
+ efx_tunnel_fini(sa->nic);
sa->state = SFC_ADAPTER_UNINITIALIZED;
}
@@ -679,6 +761,7 @@ sfc_probe(struct sfc_adapter *sa)
SFC_ASSERT(sfc_adapter_is_locked(sa));
sa->socket_id = rte_socket_id();
+ rte_atomic32_init(&sa->restart_required);
sfc_log_init(sa, "init mem bar");
rc = sfc_mem_bar_init(sa);
@@ -743,6 +826,14 @@ sfc_unprobe(struct sfc_adapter *sa)
sfc_mcdi_fini(sa);
+ /*
+ * Make sure there is no pending alarm to restart since we are
+ * going to free device private which is passed as the callback
+ * opaque data. A new alarm cannot be scheduled since MCDI is
+ * shut down.
+ */
+ rte_eal_alarm_cancel(sfc_restart_if_required, sa);
+
sfc_log_init(sa, "destroy nic");
sa->nic = NULL;
efx_nic_destroy(enp);
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index 7f11bf22..75575349 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_H
@@ -36,9 +14,10 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_kvargs.h>
#include <rte_spinlock.h>
+#include <rte_atomic.h>
#include "efx.h"
@@ -158,6 +137,8 @@ struct sfc_port {
boolean_t promisc;
boolean_t allmulti;
+ struct ether_addr default_mac_addr;
+
unsigned int max_mcast_addrs;
unsigned int nb_mcast_addrs;
uint8_t *mcast_addrs;
@@ -195,6 +176,7 @@ struct sfc_adapter {
efx_family_t family;
efx_nic_t *nic;
rte_spinlock_t nic_lock;
+ rte_atomic32_t restart_required;
struct sfc_mcdi mcdi;
struct sfc_intr intr;
@@ -210,7 +192,29 @@ struct sfc_adapter {
unsigned int evq_count;
unsigned int mgmt_evq_index;
+ /*
+ * The lock is used to serialise management event queue polling
+ * which can be done from different context. Also the lock
+ * guarantees that mgmt_evq_running is preserved while the lock
+ * is held. It is used to serialise polling and start/stop
+ * operations.
+ *
+ * Locks which may be held when the lock is acquired:
+ * - adapter lock, when:
+ * - device start/stop to change mgmt_evq_running
+ * - any control operations in client side MCDI proxy handling to
+ * poll management event queue waiting for proxy response
+ * - MCDI lock, when:
+ * - any control operations in client side MCDI proxy handling to
+ * poll management event queue waiting for proxy response
+ *
+ * Locks which are acquired with the lock held:
+ * - nic_lock, when:
+ * - MC event processing on management event queue polling
+ * (e.g. MC REBOOT or BADASSERT events)
+ */
rte_spinlock_t mgmt_evq_lock;
+ bool mgmt_evq_running;
struct sfc_evq *mgmt_evq;
unsigned int rxq_count;
@@ -305,6 +309,8 @@ void sfc_detach(struct sfc_adapter *sa);
int sfc_start(struct sfc_adapter *sa);
void sfc_stop(struct sfc_adapter *sa);
+void sfc_schedule_restart(struct sfc_adapter *sa);
+
int sfc_mcdi_init(struct sfc_adapter *sa);
void sfc_mcdi_fini(struct sfc_adapter *sa);
diff --git a/drivers/net/sfc/sfc_debug.h b/drivers/net/sfc/sfc_debug.h
index 92eba9c3..6b600ff4 100644
--- a/drivers/net/sfc/sfc_debug.h
+++ b/drivers/net/sfc/sfc_debug.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_DEBUG_H_
@@ -35,7 +13,7 @@
#include <rte_debug.h>
#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
-/* Avoid dependency from RTE_LOG_LEVEL to be able to enable debug check
+/* Avoid dependency from RTE_LOG_DP_LEVEL to be able to enable debug check
* in the driver only.
*/
#define SFC_ASSERT(exp) RTE_VERIFY(exp)
diff --git a/drivers/net/sfc/sfc_dp.c b/drivers/net/sfc/sfc_dp.c
index 860aa921..9a5ca20b 100644
--- a/drivers/net/sfc/sfc_dp.c
+++ b/drivers/net/sfc/sfc_dp.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2017 Solarflare Communications Inc.
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/queue.h>
diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h
index eff0aa87..b142532d 100644
--- a/drivers/net/sfc/sfc_dp.h
+++ b/drivers/net/sfc/sfc_dp.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2017 Solarflare Communications Inc.
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_DP_H
diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h
index 3f6a604b..be725dcb 100644
--- a/drivers/net/sfc/sfc_dp_rx.h
+++ b/drivers/net/sfc/sfc_dp_rx.h
@@ -1,39 +1,17 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2017 Solarflare Communications Inc.
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_DP_RX_H
#define _SFC_DP_RX_H
#include <rte_mempool.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "sfc_dp.h"
@@ -60,6 +38,8 @@ struct sfc_dp_rxq {
struct sfc_dp_rx_qcreate_info {
/** Memory pool to allocate Rx buffer from */
struct rte_mempool *refill_mb_pool;
+ /** Maximum number of pushed Rx descriptors in the queue */
+ unsigned int max_fill_level;
/** Minimum number of unused Rx descriptors to do refill */
unsigned int refill_threshold;
/**
@@ -101,6 +81,29 @@ struct sfc_dp_rx_qcreate_info {
};
/**
+ * Get Rx datapath specific device info.
+ *
+ * @param dev_info Device info to be adjusted
+ */
+typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
+
+/**
+ * Get size of receive and event queue rings by the number of Rx
+ * descriptors.
+ *
+ * @param nb_rx_desc Number of Rx descriptors
+ * @param rxq_entries Location for number of Rx ring entries
+ * @param evq_entries Location for number of event ring entries
+ * @param rxq_max_fill_level Location for maximum Rx ring fill level
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc,
+ unsigned int *rxq_entries,
+ unsigned int *evq_entries,
+ unsigned int *rxq_max_fill_level);
+
+/**
* Allocate and initialize datapath receive queue.
*
* @param port_id The port identifier
@@ -150,7 +153,8 @@ typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
/** Get packet types recognized/classified */
-typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(void);
+typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(
+ uint32_t tunnel_encaps);
/** Get number of pending Rx descriptors */
typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
@@ -166,6 +170,9 @@ struct sfc_dp_rx {
unsigned int features;
#define SFC_DP_RX_FEAT_SCATTER 0x1
#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2
+#define SFC_DP_RX_FEAT_TUNNELS 0x4
+ sfc_dp_rx_get_dev_info_t *get_dev_info;
+ sfc_dp_rx_qsize_up_rings_t *qsize_up_rings;
sfc_dp_rx_qcreate_t *qcreate;
sfc_dp_rx_qdestroy_t *qdestroy;
sfc_dp_rx_qstart_t *qstart;
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
index 94d1b108..0c1aad90 100644
--- a/drivers/net/sfc/sfc_dp_tx.h
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -1,38 +1,16 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_DP_TX_H
#define _SFC_DP_TX_H
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "sfc_dp.h"
@@ -57,10 +35,14 @@ struct sfc_dp_txq {
* readable.
*/
struct sfc_dp_tx_qcreate_info {
+ /** Maximum number of pushed Tx descriptors */
+ unsigned int max_fill_level;
/** Minimum number of unused Tx descriptors to do reap */
unsigned int free_thresh;
/** Transmit queue configuration flags */
unsigned int flags;
+ /** Offloads enabled on the transmit queue */
+ uint64_t offloads;
/** Tx queue size */
unsigned int txq_entries;
/** Maximum size of data in the DMA descriptor */
@@ -78,6 +60,29 @@ struct sfc_dp_tx_qcreate_info {
};
/**
+ * Get Tx datapath specific device info.
+ *
+ * @param dev_info Device info to be adjusted
+ */
+typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
+
+/**
+ * Get size of transmit and event queue rings by the number of Tx
+ * descriptors.
+ *
+ * @param nb_tx_desc Number of Tx descriptors
+ * @param txq_entries Location for number of Tx ring entries
+ * @param evq_entries Location for number of event ring entries
+ * @param txq_max_fill_level Location for maximum Tx ring fill level
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_tx_qsize_up_rings_t)(uint16_t nb_tx_desc,
+ unsigned int *txq_entries,
+ unsigned int *evq_entries,
+ unsigned int *txq_max_fill_level);
+
+/**
* Allocate and initialize datapath transmit queue.
*
* @param port_id The port identifier
@@ -144,6 +149,8 @@ struct sfc_dp_tx {
#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8
#define SFC_DP_TX_FEAT_MULTI_POOL 0x10
#define SFC_DP_TX_FEAT_REFCNT 0x20
+ sfc_dp_tx_get_dev_info_t *get_dev_info;
+ sfc_dp_tx_qsize_up_rings_t *qsize_up_rings;
sfc_dp_tx_qcreate_t *qcreate;
sfc_dp_tx_qdestroy_t *qdestroy;
sfc_dp_tx_qstart_t *qstart;
diff --git a/drivers/net/sfc/sfc_ef10.h b/drivers/net/sfc/sfc_ef10.h
index 060d8fef..ace6a1dd 100644
--- a/drivers/net/sfc/sfc_ef10.h
+++ b/drivers/net/sfc/sfc_ef10.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2017 Solarflare Communications Inc.
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_EF10_H
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 18d60c69..0b3e8fbb 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* EF10 native datapath implementation */
@@ -93,6 +71,7 @@ struct sfc_ef10_rxq {
/* Used on refill */
uint16_t buf_size;
unsigned int added;
+ unsigned int max_fill_level;
unsigned int refill_threshold;
struct rte_mempool *refill_mb_pool;
efx_qword_t *rxq_hw_ring;
@@ -141,8 +120,7 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
void *objs[SFC_RX_REFILL_BULK];
unsigned int added = rxq->added;
- free_space = SFC_EF10_RXQ_LIMIT(ptr_mask + 1) -
- (added - rxq->completed);
+ free_space = rxq->max_fill_level - (added - rxq->completed);
if (free_space < rxq->refill_threshold)
return;
@@ -251,6 +229,11 @@ static void
sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
struct rte_mbuf *m)
{
+ uint32_t tun_ptype = 0;
+ /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */
+ int8_t ip_csum_err_bit;
+ /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */
+ int8_t l4_csum_err_bit;
uint32_t l2_ptype = 0;
uint32_t l3_ptype = 0;
uint32_t l4_ptype = 0;
@@ -259,15 +242,51 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))
goto done;
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) {
+ default:
+ /* Unexpected encapsulation tag class */
+ SFC_ASSERT(false);
+ /* FALLTHROUGH */
+ case ESE_EZ_ENCAP_HDR_NONE:
+ break;
+ case ESE_EZ_ENCAP_HDR_VXLAN:
+ /*
+ * It is definitely UDP, but we have no information
+ * about IPv4 vs IPv6 and VLAN tagging.
+ */
+ tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
+ break;
+ case ESE_EZ_ENCAP_HDR_GRE:
+ /*
+ * We have no information about IPv4 vs IPv6 and VLAN tagging.
+ */
+ tun_ptype = RTE_PTYPE_TUNNEL_NVGRE;
+ break;
+ }
+
+ if (tun_ptype == 0) {
+ ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN;
+ l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN;
+ } else {
+ ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN;
+ l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN;
+ if (unlikely(EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_IPCKSUM_ERR_LBN)))
+ ol_flags |= PKT_RX_EIP_CKSUM_BAD;
+ }
+
switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
case ESE_DZ_ETH_TAG_CLASS_NONE:
- l2_ptype = RTE_PTYPE_L2_ETHER;
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER :
+ RTE_PTYPE_INNER_L2_ETHER;
break;
case ESE_DZ_ETH_TAG_CLASS_VLAN1:
- l2_ptype = RTE_PTYPE_L2_ETHER_VLAN;
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN :
+ RTE_PTYPE_INNER_L2_ETHER_VLAN;
break;
case ESE_DZ_ETH_TAG_CLASS_VLAN2:
- l2_ptype = RTE_PTYPE_L2_ETHER_QINQ;
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ :
+ RTE_PTYPE_INNER_L2_ETHER_QINQ;
break;
default:
/* Unexpected Eth tag class */
@@ -276,25 +295,30 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
case ESE_DZ_L3_CLASS_IP4_FRAG:
- l4_ptype = RTE_PTYPE_L4_FRAG;
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
+ RTE_PTYPE_INNER_L4_FRAG;
/* FALLTHROUGH */
case ESE_DZ_L3_CLASS_IP4:
- l3_ptype = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN :
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
ol_flags |= PKT_RX_RSS_HASH |
- ((EFX_TEST_QWORD_BIT(rx_ev,
- ESF_DZ_RX_IPCKSUM_ERR_LBN)) ?
+ ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ?
PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
break;
case ESE_DZ_L3_CLASS_IP6_FRAG:
- l4_ptype |= RTE_PTYPE_L4_FRAG;
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
+ RTE_PTYPE_INNER_L4_FRAG;
/* FALLTHROUGH */
case ESE_DZ_L3_CLASS_IP6:
- l3_ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
ol_flags |= PKT_RX_RSS_HASH;
break;
case ESE_DZ_L3_CLASS_ARP:
/* Override Layer 2 packet type */
- l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
+ /* There is no ARP classification for inner packets */
+ if (tun_ptype == 0)
+ l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
break;
default:
/* Unexpected Layer 3 class */
@@ -303,17 +327,17 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L4_CLASS)) {
case ESE_DZ_L4_CLASS_TCP:
- l4_ptype = RTE_PTYPE_L4_TCP;
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP :
+ RTE_PTYPE_INNER_L4_TCP;
ol_flags |=
- (EFX_TEST_QWORD_BIT(rx_ev,
- ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ?
+ (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
break;
case ESE_DZ_L4_CLASS_UDP:
- l4_ptype = RTE_PTYPE_L4_UDP;
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP :
+ RTE_PTYPE_INNER_L4_UDP;
ol_flags |=
- (EFX_TEST_QWORD_BIT(rx_ev,
- ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ?
+ (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
break;
case ESE_DZ_L4_CLASS_UNKNOWN:
@@ -329,7 +353,7 @@ sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
done:
m->ol_flags = ol_flags;
- m->packet_type = l2_ptype | l3_ptype | l4_ptype;
+ m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype;
}
static uint16_t
@@ -515,7 +539,7 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
static const uint32_t *
-sfc_ef10_supported_ptypes_get(void)
+sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
{
static const uint32_t ef10_native_ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -529,8 +553,47 @@ sfc_ef10_supported_ptypes_get(void)
RTE_PTYPE_L4_UDP,
RTE_PTYPE_UNKNOWN
};
+ static const uint32_t ef10_overlay_ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_QINQ,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_TUNNEL_NVGRE,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L2_ETHER_QINQ,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
- return ef10_native_ptypes;
+ /*
+ * The function returns static set of supported packet types,
+ * so we can't build it dynamically based on supported tunnel
+ * encapsulations and should limit to known sets.
+ */
+ switch (tunnel_encaps) {
+ case (1u << EFX_TUNNEL_PROTOCOL_VXLAN |
+ 1u << EFX_TUNNEL_PROTOCOL_GENEVE |
+ 1u << EFX_TUNNEL_PROTOCOL_NVGRE):
+ return ef10_overlay_ptypes;
+ default:
+ RTE_LOG(ERR, PMD,
+ "Unexpected set of supported tunnel encapsulations: %#x\n",
+ tunnel_encaps);
+ /* FALLTHROUGH */
+ case 0:
+ return ef10_native_ptypes;
+ }
}
static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
@@ -553,6 +616,43 @@ sfc_ef10_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
}
+static sfc_dp_rx_get_dev_info_t sfc_ef10_rx_get_dev_info;
+static void
+sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
+{
+ /*
+ * Number of descriptors just defines maximum number of pushed
+ * descriptors (fill level).
+ */
+ dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
+ dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
+}
+
+
+static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
+static int
+sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ unsigned int *rxq_entries,
+ unsigned int *evq_entries,
+ unsigned int *rxq_max_fill_level)
+{
+ /*
+ * rte_ethdev API guarantees that the number meets min, max and
+ * alignment requirements.
+ */
+ if (nb_rx_desc <= EFX_RXQ_MINNDESCS)
+ *rxq_entries = EFX_RXQ_MINNDESCS;
+ else
+ *rxq_entries = rte_align32pow2(nb_rx_desc);
+
+ *evq_entries = *rxq_entries;
+
+ *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
+ SFC_EF10_RXQ_LIMIT(*evq_entries));
+ return 0;
+}
+
+
static uint64_t
sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
{
@@ -606,6 +706,7 @@ sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
rxq->ptr_mask = info->rxq_entries - 1;
rxq->evq_hw_ring = info->evq_hw_ring;
+ rxq->max_fill_level = info->max_fill_level;
rxq->refill_threshold = info->refill_threshold;
rxq->rearm_data =
sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
@@ -707,7 +808,10 @@ struct sfc_dp_rx sfc_ef10_rx = {
.type = SFC_DP_RX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
- .features = SFC_DP_RX_FEAT_MULTI_PROCESS,
+ .features = SFC_DP_RX_FEAT_MULTI_PROCESS |
+ SFC_DP_RX_FEAT_TUNNELS,
+ .get_dev_info = sfc_ef10_rx_get_dev_info,
+ .qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
.qcreate = sfc_ef10_rx_qcreate,
.qdestroy = sfc_ef10_rx_qdestroy,
.qstart = sfc_ef10_rx_qstart,
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index 0454e79a..12387972 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdbool.h>
@@ -77,6 +55,7 @@ struct sfc_ef10_txq {
unsigned int ptr_mask;
unsigned int added;
unsigned int completed;
+ unsigned int max_fill_level;
unsigned int free_thresh;
unsigned int evq_read_ptr;
struct sfc_ef10_tx_sw_desc *sw_ring;
@@ -288,7 +267,6 @@ static uint16_t
sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
- unsigned int ptr_mask;
unsigned int added;
unsigned int dma_desc_space;
bool reap_done;
@@ -299,16 +277,13 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
return 0;
- ptr_mask = txq->ptr_mask;
added = txq->added;
- dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
- (added - txq->completed);
+ dma_desc_space = txq->max_fill_level - (added - txq->completed);
reap_done = (dma_desc_space < txq->free_thresh);
if (reap_done) {
sfc_ef10_tx_reap(txq);
- dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
- (added - txq->completed);
+ dma_desc_space = txq->max_fill_level - (added - txq->completed);
}
for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
@@ -333,7 +308,7 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
sfc_ef10_tx_reap(txq);
reap_done = true;
- dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ dma_desc_space = txq->max_fill_level -
(added - txq->completed);
if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
break;
@@ -343,7 +318,7 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
do {
rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
- unsigned int id = added & ptr_mask;
+ unsigned int id = added & txq->ptr_mask;
SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
@@ -446,14 +421,12 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
ptr_mask = txq->ptr_mask;
added = txq->added;
- dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
- (added - txq->completed);
+ dma_desc_space = txq->max_fill_level - (added - txq->completed);
reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
if (reap_done) {
sfc_ef10_simple_tx_reap(txq);
- dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
- (added - txq->completed);
+ dma_desc_space = txq->max_fill_level - (added - txq->completed);
}
pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
@@ -486,6 +459,40 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return pktp - &tx_pkts[0];
}
+static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info;
+static void
+sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info)
+{
+ /*
+ * Number of descriptors just defines maximum number of pushed
+ * descriptors (fill level).
+ */
+ dev_info->tx_desc_lim.nb_min = 1;
+ dev_info->tx_desc_lim.nb_align = 1;
+}
+
+static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
+static int
+sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,
+ unsigned int *txq_entries,
+ unsigned int *evq_entries,
+ unsigned int *txq_max_fill_level)
+{
+ /*
+ * rte_ethdev API guarantees that the number meets min, max and
+ * alignment requirements.
+ */
+ if (nb_tx_desc <= EFX_TXQ_MINNDESCS)
+ *txq_entries = EFX_TXQ_MINNDESCS;
+ else
+ *txq_entries = rte_align32pow2(nb_tx_desc);
+
+ *evq_entries = *txq_entries;
+
+ *txq_max_fill_level = RTE_MIN(nb_tx_desc,
+ SFC_EF10_TXQ_LIMIT(*evq_entries));
+ return 0;
+}
static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
static int
@@ -519,6 +526,7 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
txq->ptr_mask = info->txq_entries - 1;
+ txq->max_fill_level = info->max_fill_level;
txq->free_thresh = info->free_thresh;
txq->txq_hw_ring = info->txq_hw_ring;
txq->doorbell = (volatile uint8_t *)info->mem_bar +
@@ -628,6 +636,8 @@ struct sfc_dp_tx sfc_ef10_tx = {
SFC_DP_TX_FEAT_MULTI_POOL |
SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_PROCESS,
+ .get_dev_info = sfc_ef10_get_dev_info,
+ .qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
.qstart = sfc_ef10_tx_qstart,
@@ -644,6 +654,8 @@ struct sfc_dp_tx sfc_ef10_simple_tx = {
.type = SFC_DP_TX,
},
.features = SFC_DP_TX_FEAT_MULTI_PROCESS,
+ .get_dev_info = sfc_ef10_get_dev_info,
+ .qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
.qcreate = sfc_ef10_tx_qcreate,
.qdestroy = sfc_ef10_tx_qdestroy,
.qstart = sfc_ef10_tx_qstart,
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 2f5f86f8..89a45290 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -1,36 +1,14 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_dev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -105,6 +83,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct sfc_adapter *sa = dev->data->dev_private;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t txq_offloads_def = 0;
sfc_log_init(sa, "entry");
@@ -126,22 +105,35 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
/* By default packets are dropped if no descriptors are available */
dev_info->default_rxconf.rx_drop_en = 1;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa);
+
+ /*
+ * rx_offload_capa includes both device and queue offloads since
+ * the latter may be requested on a per device basis which makes
+ * sense when some offloads are needed to be set on all queues.
+ */
+ dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) |
+ dev_info->rx_queue_offload_capa;
+
+ dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa);
+
+ /*
+ * tx_offload_capa includes both device and queue offloads since
+ * the latter may be requested on a per device basis which makes
+ * sense when some offloads are needed to be set on all queues.
+ */
+ dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
+ dev_info->tx_queue_offload_capa;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ dev_info->default_txconf.offloads |= txq_offloads_def;
dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) ||
!encp->enc_hw_tx_insert_vlan_enabled)
dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
- else
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
@@ -160,9 +152,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
#endif
- if (sa->tso)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
-
+ /* Initialize to hardware limits */
dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
/* The RXQ hardware requires that the descriptor count is a power
@@ -170,6 +160,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
*/
dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
+ /* Initialize to hardware limits */
dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
/*
@@ -177,14 +168,21 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
* of 2, but tx_desc_lim cannot properly describe that constraint
*/
dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
+
+ if (sa->dp_rx->get_dev_info != NULL)
+ sa->dp_rx->get_dev_info(dev_info);
+ if (sa->dp_tx->get_dev_info != NULL)
+ sa->dp_tx->get_dev_info(dev_info);
}
static const uint32_t *
sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint32_t tunnel_encaps = encp->enc_tunnel_encapsulations_supported;
- return sa->dp_rx->supported_ptypes_get();
+ return sa->dp_rx->supported_ptypes_get(tunnel_encaps);
}
static int
@@ -895,7 +893,13 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* The driver does not use it, but other PMDs update jumbo_frame
* flag and max_rx_pkt_len when MTU is set.
*/
- dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN);
+ if (mtu > ETHER_MAX_LEN) {
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ rxmode->jumbo_frame = 1;
+ }
+
dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
sfc_adapter_unlock(sa);
@@ -926,6 +930,12 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
sfc_adapter_lock(sa);
+ /*
+ * Copy the address to the device private data so that
+ * it could be recalled in the case of adapter restart.
+ */
+ ether_addr_copy(mac_addr, &port->default_mac_addr);
+
if (port->isolated) {
sfc_err(sa, "isolated mode is active on the port");
sfc_err(sa, "will not set MAC address");
@@ -961,9 +971,9 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
/*
* Since setting MAC address with filters installed is not
- * allowed on the adapter, one needs to simply restart adapter
- * so that the new MAC address will be taken from an outer
- * storage and set flawlessly by means of sfc_start() call
+ * allowed on the adapter, the new MAC address will be set
+ * by means of adapter restart. sfc_start() shall retrieve
+ * the new address from the device private data and set it.
*/
sfc_stop(sa);
rc = sfc_start(sa);
@@ -972,6 +982,13 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
}
unlock:
+ /*
+ * In the case of failure sa->port->default_mac_addr does not
+ * need rollback since no error code is returned, and the upper
+ * API will anyway update the external MAC address storage.
+ * To be consistent with that new value it is better to keep
+ * the device private value the same.
+ */
sfc_adapter_unlock(sa);
}
@@ -1045,7 +1062,13 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
qinfo->conf.rx_free_thresh = rxq->refill_threshold;
qinfo->conf.rx_drop_en = 1;
qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
- qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER);
+ qinfo->conf.offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
+ qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+ qinfo->scattered_rx = 1;
+ }
qinfo->nb_desc = rxq_info->entries;
sfc_adapter_unlock(sa);
@@ -1072,6 +1095,7 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
memset(qinfo, 0, sizeof(*qinfo));
qinfo->conf.txq_flags = txq_info->txq->flags;
+ qinfo->conf.offloads = txq_info->txq->offloads;
qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
qinfo->conf.tx_deferred_start = txq_info->deferred_start;
qinfo->nb_desc = txq_info->entries;
@@ -1211,6 +1235,123 @@ sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return 0;
}
+static efx_tunnel_protocol_t
+sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
+{
+ switch (rte_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ return EFX_TUNNEL_PROTOCOL_VXLAN;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ return EFX_TUNNEL_PROTOCOL_GENEVE;
+ default:
+ return EFX_TUNNEL_NPROTOS;
+ }
+}
+
+enum sfc_udp_tunnel_op_e {
+ SFC_UDP_TUNNEL_ADD_PORT,
+ SFC_UDP_TUNNEL_DEL_PORT,
+};
+
+static int
+sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp,
+ enum sfc_udp_tunnel_op_e op)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ efx_tunnel_protocol_t tunnel_proto;
+ int rc;
+
+ sfc_log_init(sa, "%s udp_port=%u prot_type=%u",
+ (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" :
+ (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown",
+ tunnel_udp->udp_port, tunnel_udp->prot_type);
+
+ tunnel_proto =
+ sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type);
+ if (tunnel_proto >= EFX_TUNNEL_NPROTOS) {
+ rc = ENOTSUP;
+ goto fail_bad_proto;
+ }
+
+ sfc_adapter_lock(sa);
+
+ switch (op) {
+ case SFC_UDP_TUNNEL_ADD_PORT:
+ rc = efx_tunnel_config_udp_add(sa->nic,
+ tunnel_udp->udp_port,
+ tunnel_proto);
+ break;
+ case SFC_UDP_TUNNEL_DEL_PORT:
+ rc = efx_tunnel_config_udp_remove(sa->nic,
+ tunnel_udp->udp_port,
+ tunnel_proto);
+ break;
+ default:
+ rc = EINVAL;
+ goto fail_bad_op;
+ }
+
+ if (rc != 0)
+ goto fail_op;
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_tunnel_reconfigure(sa->nic);
+ if (rc == EAGAIN) {
+ /*
+ * Configuration is accepted by FW and MC reboot
+ * is initiated to apply the changes. MC reboot
+ * will be handled in a usual way (MC reboot
+ * event on management event queue and adapter
+ * restart).
+ */
+ rc = 0;
+ } else if (rc != 0) {
+ goto fail_reconfigure;
+ }
+ }
+
+ sfc_adapter_unlock(sa);
+ return 0;
+
+fail_reconfigure:
+ /* Remove/restore entry since the change makes the trouble */
+ switch (op) {
+ case SFC_UDP_TUNNEL_ADD_PORT:
+ (void)efx_tunnel_config_udp_remove(sa->nic,
+ tunnel_udp->udp_port,
+ tunnel_proto);
+ break;
+ case SFC_UDP_TUNNEL_DEL_PORT:
+ (void)efx_tunnel_config_udp_add(sa->nic,
+ tunnel_udp->udp_port,
+ tunnel_proto);
+ break;
+ }
+
+fail_op:
+fail_bad_op:
+ sfc_adapter_unlock(sa);
+
+fail_bad_proto:
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT);
+}
+
+static int
+sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
+}
+
#if EFSYS_OPT_RX_SCALE
static int
sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
@@ -1515,6 +1656,8 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.flow_ctrl_get = sfc_flow_ctrl_get,
.flow_ctrl_set = sfc_flow_ctrl_set,
.mac_addr_set = sfc_mac_addr_set,
+ .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
#if EFSYS_OPT_RX_SCALE
.reta_update = sfc_dev_rss_reta_update,
.reta_query = sfc_dev_rss_reta_query,
diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c
index a16dc27b..7abe61ae 100644
--- a/drivers/net/sfc/sfc_ev.c
+++ b/drivers/net/sfc/sfc_ev.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_debug.h>
@@ -565,10 +543,8 @@ void
sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
{
if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
- struct sfc_evq *mgmt_evq = sa->mgmt_evq;
-
- if (mgmt_evq->init_state == SFC_EVQ_STARTED)
- sfc_ev_qpoll(mgmt_evq);
+ if (sa->mgmt_evq_running)
+ sfc_ev_qpoll(sa->mgmt_evq);
rte_spinlock_unlock(&sa->mgmt_evq_lock);
}
@@ -734,20 +710,26 @@ sfc_ev_start(struct sfc_adapter *sa)
goto fail_ev_init;
/* Start management EVQ used for global events */
- rte_spinlock_lock(&sa->mgmt_evq_lock);
+ /*
+ * Management event queue start polls the queue, but it cannot
+ * interfere with other polling contexts since mgmt_evq_running
+ * is false yet.
+ */
rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
if (rc != 0)
goto fail_mgmt_evq_start;
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+ sa->mgmt_evq_running = true;
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
if (sa->intr.lsc_intr) {
rc = sfc_ev_qprime(sa->mgmt_evq);
if (rc != 0)
- goto fail_evq0_prime;
+ goto fail_mgmt_evq_prime;
}
- rte_spinlock_unlock(&sa->mgmt_evq_lock);
-
/*
* Start management EVQ polling. If interrupts are disabled
* (not used), it is required to process link status change
@@ -763,11 +745,10 @@ sfc_ev_start(struct sfc_adapter *sa)
return 0;
-fail_evq0_prime:
+fail_mgmt_evq_prime:
sfc_ev_qstop(sa->mgmt_evq);
fail_mgmt_evq_start:
- rte_spinlock_unlock(&sa->mgmt_evq_lock);
efx_ev_fini(sa->nic);
fail_ev_init:
@@ -783,9 +764,11 @@ sfc_ev_stop(struct sfc_adapter *sa)
sfc_ev_mgmt_periodic_qpoll_stop(sa);
rte_spinlock_lock(&sa->mgmt_evq_lock);
- sfc_ev_qstop(sa->mgmt_evq);
+ sa->mgmt_evq_running = false;
rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ sfc_ev_qstop(sa->mgmt_evq);
+
efx_ev_fini(sa->nic);
}
diff --git a/drivers/net/sfc/sfc_ev.h b/drivers/net/sfc/sfc_ev.h
index 065defe0..872f79b9 100644
--- a/drivers/net/sfc/sfc_ev.h
+++ b/drivers/net/sfc/sfc_ev.h
@@ -1,38 +1,16 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_EV_H_
#define _SFC_EV_H_
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "efx.h"
diff --git a/drivers/net/sfc/sfc_filter.c b/drivers/net/sfc/sfc_filter.c
index 58b74de7..77e2ea56 100644
--- a/drivers/net/sfc/sfc_filter.c
+++ b/drivers/net/sfc/sfc_filter.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2017 Solarflare Communications Inc.
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_common.h>
diff --git a/drivers/net/sfc/sfc_filter.h b/drivers/net/sfc/sfc_filter.h
index d884f37d..d3e1c2f9 100644
--- a/drivers/net/sfc/sfc_filter.h
+++ b/drivers/net/sfc/sfc_filter.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2017 Solarflare Communications Inc.
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_FILTER_H
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index f2050f65..93cdf8f4 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -1,37 +1,15 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2017 Solarflare Communications Inc.
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_tailq.h>
#include <rte_common.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_eth_ctrl.h>
#include <rte_ether.h>
#include <rte_flow.h>
@@ -1021,7 +999,7 @@ fail_scale_tbl_set:
fail_filter_insert:
fail_scale_key_set:
fail_scale_mode_set:
- if (rss != NULL)
+ if (flow->rss)
efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
fail_scale_context_alloc:
@@ -1126,8 +1104,6 @@ sfc_flow_parse(struct rte_eth_dev *dev,
struct sfc_adapter *sa = dev->data->dev_private;
int rc;
- memset(&flow->spec, 0, sizeof(flow->spec));
-
rc = sfc_flow_parse_attr(attr, flow, error);
if (rc != 0)
goto fail_bad_value;
@@ -1160,6 +1136,8 @@ sfc_flow_validate(struct rte_eth_dev *dev,
{
struct rte_flow flow;
+ memset(&flow, 0, sizeof(flow));
+
return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
}
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
index aa740d7d..35472ad3 100644
--- a/drivers/net/sfc/sfc_flow.h
+++ b/drivers/net/sfc/sfc_flow.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2017 Solarflare Communications Inc.
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_FLOW_H
diff --git a/drivers/net/sfc/sfc_intr.c b/drivers/net/sfc/sfc_intr.c
index ee59cb1c..d6c84927 100644
--- a/drivers/net/sfc/sfc_intr.c
+++ b/drivers/net/sfc/sfc_intr.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
@@ -57,8 +35,9 @@ sfc_intr_handle_mgmt_evq(struct sfc_adapter *sa)
evq = sa->mgmt_evq;
- if (evq->init_state != SFC_EVQ_STARTED) {
- sfc_log_init(sa, "interrupt on stopped EVQ %u", evq->evq_index);
+ if (!sa->mgmt_evq_running) {
+ sfc_log_init(sa, "interrupt on not running management EVQ %u",
+ evq->evq_index);
} else {
sfc_ev_qpoll(evq);
@@ -112,7 +91,7 @@ exit:
"UP" : "DOWN");
_rte_eth_dev_callback_process(sa->eth_dev,
RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
}
@@ -154,7 +133,7 @@ exit:
sfc_info(sa, "link status change event");
_rte_eth_dev_callback_process(sa->eth_dev,
RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
}
diff --git a/drivers/net/sfc/sfc_kvargs.c b/drivers/net/sfc/sfc_kvargs.c
index 7bcd5951..8f55e99b 100644
--- a/drivers/net/sfc/sfc_kvargs.c
+++ b/drivers/net/sfc/sfc_kvargs.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdbool.h>
diff --git a/drivers/net/sfc/sfc_kvargs.h b/drivers/net/sfc/sfc_kvargs.h
index d9c3b1da..e7044ca6 100644
--- a/drivers/net/sfc/sfc_kvargs.h
+++ b/drivers/net/sfc/sfc_kvargs.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_KVARGS_H
diff --git a/drivers/net/sfc/sfc_log.h b/drivers/net/sfc/sfc_log.h
index b1a9df44..a18191ed 100644
--- a/drivers/net/sfc/sfc_log.h
+++ b/drivers/net/sfc/sfc_log.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_LOG_H_
diff --git a/drivers/net/sfc/sfc_mcdi.c b/drivers/net/sfc/sfc_mcdi.c
index 0faad3ed..9d92b8c5 100644
--- a/drivers/net/sfc/sfc_mcdi.c
+++ b/drivers/net/sfc/sfc_mcdi.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_cycles.h>
@@ -176,7 +154,7 @@ sfc_mcdi_exception(void *arg, efx_mcdi_exception_t eme)
(eme == EFX_MCDI_EXCEPTION_MC_REBOOT) ? "REBOOT" :
(eme == EFX_MCDI_EXCEPTION_MC_BADASSERT) ? "BADASSERT" : "UNKNOWN");
- sfc_panic(sa, "MCDI exceptions handling is not implemented\n");
+ sfc_schedule_restart(sa);
}
#define SFC_MCDI_LOG_BUF_SIZE 128
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index c1466a77..c423f527 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "efx.h"
@@ -188,10 +166,10 @@ sfc_port_start(struct sfc_adapter *sa)
goto fail_mac_pdu_set;
if (!port->isolated) {
- struct ether_addr *mac_addrs = sa->eth_dev->data->mac_addrs;
+ struct ether_addr *addr = &port->default_mac_addr;
sfc_log_init(sa, "set MAC address");
- rc = efx_mac_addr_set(sa->nic, mac_addrs[0].addr_bytes);
+ rc = efx_mac_addr_set(sa->nic, addr->addr_bytes);
if (rc != 0)
goto fail_mac_addr_set;
@@ -279,10 +257,10 @@ fail_port_init_dev_link:
(void)efx_mac_drain(sa->nic, B_TRUE);
fail_mac_drain:
+fail_mac_stats_upload:
(void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
0, B_FALSE);
-fail_mac_stats_upload:
fail_mac_stats_periodic:
fail_mcast_address_list_set:
fail_mac_filter_set:
@@ -321,11 +299,12 @@ sfc_port_configure(struct sfc_adapter *sa)
{
const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
struct sfc_port *port = &sa->port;
+ const struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
sfc_log_init(sa, "entry");
- if (dev_data->dev_conf.rxmode.jumbo_frame)
- port->pdu = dev_data->dev_conf.rxmode.max_rx_pkt_len;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ port->pdu = rxmode->max_rx_pkt_len;
else
port->pdu = EFX_MAC_PDU(dev_data->mtu);
@@ -342,6 +321,8 @@ int
sfc_port_attach(struct sfc_adapter *sa)
{
struct sfc_port *port = &sa->port;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const struct ether_addr *from;
long kvarg_stats_update_period_ms;
int rc;
@@ -353,6 +334,10 @@ sfc_port_attach(struct sfc_adapter *sa)
port->flow_ctrl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
port->flow_ctrl_autoneg = B_TRUE;
+ RTE_BUILD_BUG_ON(sizeof(encp->enc_mac_addr) != sizeof(*from));
+ from = (const struct ether_addr *)(encp->enc_mac_addr);
+ ether_addr_copy(from, &port->default_mac_addr);
+
port->max_mcast_addrs = EFX_MAC_MULTICAST_LIST_MAX;
port->nb_mcast_addrs = 0;
port->mcast_addrs = rte_calloc_socket("mcast_addr_list_buf",
@@ -404,9 +389,14 @@ sfc_port_attach(struct sfc_adapter *sa)
return 0;
fail_kvarg_stats_update_period_ms:
+ sfc_dma_free(sa, &port->mac_stats_dma_mem);
+
fail_mac_stats_dma_alloc:
rte_free(port->mac_stats_buf);
+
fail_mac_stats_buf_alloc:
+ rte_free(port->mcast_addrs);
+
fail_mcast_addr_list_buf_alloc:
sfc_log_init(sa, "failed %d", rc);
return rc;
@@ -422,6 +412,8 @@ sfc_port_detach(struct sfc_adapter *sa)
sfc_dma_free(sa, &port->mac_stats_dma_mem);
rte_free(port->mac_stats_buf);
+ rte_free(port->mcast_addrs);
+
sfc_log_init(sa, "done");
}
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 7816393b..abc53fb5 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_mempool.h>
@@ -88,8 +66,7 @@ sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
struct rte_mbuf *m;
uint16_t port_id = rxq->dp.dpq.port_id;
- free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
- (added - rxq->completed);
+ free_space = rxq->max_fill_level - (added - rxq->completed);
if (free_space < rxq->refill_threshold)
return;
@@ -193,7 +170,7 @@ sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
}
static const uint32_t *
-sfc_efx_supported_ptypes_get(void)
+sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
@@ -413,6 +390,19 @@ sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
return rxq;
}
+static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
+static int
+sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ unsigned int *rxq_entries,
+ unsigned int *evq_entries,
+ unsigned int *rxq_max_fill_level)
+{
+ *rxq_entries = nb_rx_desc;
+ *evq_entries = nb_rx_desc;
+ *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
+ return 0;
+}
+
static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
static int
sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
@@ -446,6 +436,7 @@ sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
rxq->ptr_mask = info->rxq_entries - 1;
rxq->batch_max = info->batch_max;
rxq->prefix_size = info->prefix_size;
+ rxq->max_fill_level = info->max_fill_level;
rxq->refill_threshold = info->refill_threshold;
rxq->buf_size = info->buf_size;
rxq->refill_mb_pool = info->refill_mb_pool;
@@ -535,6 +526,7 @@ struct sfc_dp_rx sfc_efx_rx = {
.hw_fw_caps = 0,
},
.features = SFC_DP_RX_FEAT_SCATTER,
+ .qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.qcreate = sfc_efx_rx_qcreate,
.qdestroy = sfc_efx_rx_qdestroy,
.qstart = sfc_efx_rx_qstart,
@@ -697,8 +689,8 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
&rxq->mem, rxq_info->entries,
- 0 /* not used on EF10 */, evq->common,
- &rxq->common);
+ 0 /* not used on EF10 */, rxq_info->type_flags,
+ evq->common, &rxq->common);
if (rc != 0)
goto fail_rx_qcreate;
@@ -770,11 +762,81 @@ sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
sfc_ev_qstop(rxq->evq);
}
+uint64_t
+sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t caps = 0;
+
+ caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ caps |= DEV_RX_OFFLOAD_CRC_STRIP;
+ caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+ caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
+ caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ if (encp->enc_tunnel_encapsulations_supported &&
+ (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
+ caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+ return caps;
+}
+
+uint64_t
+sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
+{
+ uint64_t caps = 0;
+
+ if (sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
+ caps |= DEV_RX_OFFLOAD_SCATTER;
+
+ return caps;
+}
+
+static void
+sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
+ const char *verdict, uint64_t offloads)
+{
+ unsigned long long bit;
+
+ while ((bit = __builtin_ffsll(offloads)) != 0) {
+ uint64_t flag = (1ULL << --bit);
+
+ sfc_err(sa, "Rx %s offload %s %s", offload_group,
+ rte_eth_dev_rx_offload_name(flag), verdict);
+
+ offloads &= ~flag;
+ }
+}
+
+static boolean_t
+sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
+{
+ uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
+ sfc_rx_get_queue_offload_caps(sa);
+ uint64_t rejected = requested & ~supported;
+ uint64_t missing = (requested & mandatory) ^ mandatory;
+ boolean_t mismatch = B_FALSE;
+
+ if (rejected) {
+ sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
+ mismatch = B_TRUE;
+ }
+
+ if (missing) {
+ sfc_rx_log_offloads(sa, "queue", "must be set", missing);
+ mismatch = B_TRUE;
+ }
+
+ return mismatch;
+}
+
static int
-sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
+sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
const struct rte_eth_rxconf *rx_conf)
{
- const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
+ uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
+ sfc_rx_get_queue_offload_caps(sa);
int rc = 0;
if (rx_conf->rx_thresh.pthresh != 0 ||
@@ -784,10 +846,10 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
"RxQ prefetch/host/writeback thresholds are not supported");
}
- if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
+ if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
sfc_err(sa,
"RxQ free threshold too large: %u vs maximum %u",
- rx_conf->rx_free_thresh, rx_free_thresh_max);
+ rx_conf->rx_free_thresh, rxq_max_fill_level);
rc = EINVAL;
}
@@ -796,6 +858,17 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
rc = EINVAL;
}
+ if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+ DEV_RX_OFFLOAD_CHECKSUM)
+ sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
+
+ if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+ (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
+
+ if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
+ rc = EINVAL;
+
return rc;
}
@@ -907,13 +980,25 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
int rc;
+ unsigned int rxq_entries;
+ unsigned int evq_entries;
+ unsigned int rxq_max_fill_level;
uint16_t buf_size;
struct sfc_rxq_info *rxq_info;
struct sfc_evq *evq;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
- rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
+ rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries,
+ &rxq_max_fill_level);
+ if (rc != 0)
+ goto fail_size_up_rings;
+ SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
+ SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
+ SFC_ASSERT(rxq_entries >= nb_rx_desc);
+ SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
+
+ rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
if (rc != 0)
goto fail_bad_conf;
@@ -926,7 +1011,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
}
if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
- !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
+ (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
"object size is too small", sw_index);
sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -940,14 +1025,19 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
SFC_ASSERT(sw_index < sa->rxq_count);
rxq_info = &sa->rxq_info[sw_index];
- SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
- rxq_info->entries = nb_rx_desc;
- rxq_info->type =
- sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
- EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
+ SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
+ rxq_info->entries = rxq_entries;
+ rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
+ rxq_info->type_flags =
+ (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+ EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
+
+ if ((encp->enc_tunnel_encapsulations_supported != 0) &&
+ (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
+ rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
- rxq_info->entries, socket_id, &evq);
+ evq_entries, socket_id, &evq);
if (rc != 0)
goto fail_ev_qinit;
@@ -972,6 +1062,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
memset(&info, 0, sizeof(info));
info.refill_mb_pool = rxq->refill_mb_pool;
+ info.max_fill_level = rxq_max_fill_level;
info.refill_threshold = rxq->refill_threshold;
info.buf_size = buf_size;
info.batch_max = encp->enc_rx_batch_max;
@@ -984,7 +1075,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.rxq_entries = rxq_info->entries;
info.rxq_hw_ring = rxq->mem.esm_base;
- info.evq_entries = rxq_info->entries;
+ info.evq_entries = evq_entries;
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = rxq->hw_index;
info.mem_bar = sa->mem_bar.esb_base;
@@ -1017,6 +1108,7 @@ fail_ev_qinit:
rxq_info->entries = 0;
fail_bad_conf:
+fail_size_up_rings:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
@@ -1200,6 +1292,9 @@ sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
static int
sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
{
+ uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
+ sfc_rx_get_queue_offload_caps(sa);
+ uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported;
int rc = 0;
switch (rxmode->mq_mode) {
@@ -1220,45 +1315,18 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
rc = EINVAL;
}
- if (rxmode->header_split) {
- sfc_err(sa, "Header split on Rx not supported");
- rc = EINVAL;
- }
-
- if (rxmode->hw_vlan_filter) {
- sfc_err(sa, "HW VLAN filtering not supported");
- rc = EINVAL;
- }
-
- if (rxmode->hw_vlan_strip) {
- sfc_err(sa, "HW VLAN stripping not supported");
- rc = EINVAL;
- }
-
- if (rxmode->hw_vlan_extend) {
- sfc_err(sa,
- "Q-in-Q HW VLAN stripping not supported");
+ if (offloads_rejected) {
+ sfc_rx_log_offloads(sa, "device", "is unsupported",
+ offloads_rejected);
rc = EINVAL;
}
- if (!rxmode->hw_strip_crc) {
- sfc_warn(sa,
- "FCS stripping control not supported - always stripped");
+ if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ sfc_warn(sa, "FCS stripping cannot be disabled - always on");
+ rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
rxmode->hw_strip_crc = 1;
}
- if (rxmode->enable_scatter &&
- (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
- sfc_err(sa, "Rx scatter not supported by %s datapath",
- sa->dp_rx->dp.name);
- rc = EINVAL;
- }
-
- if (rxmode->enable_lro) {
- sfc_err(sa, "LRO not supported");
- rc = EINVAL;
- }
-
return rc;
}
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 9e6282ea..6706ee6f 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_RX_H
@@ -34,7 +12,7 @@
#include <rte_mbuf.h>
#include <rte_mempool.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "efx.h"
@@ -121,6 +99,7 @@ struct sfc_efx_rxq {
/* Used on refill */
unsigned int added;
unsigned int pushed;
+ unsigned int max_fill_level;
unsigned int refill_threshold;
uint16_t buf_size;
struct rte_mempool *refill_mb_pool;
@@ -144,6 +123,7 @@ struct sfc_rxq_info {
unsigned int max_entries;
unsigned int entries;
efx_rxq_type_t type;
+ unsigned int type_flags;
struct sfc_rxq *rxq;
boolean_t deferred_start;
boolean_t deferred_started;
@@ -162,6 +142,9 @@ void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
+uint64_t sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa);
+uint64_t sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa);
+
void sfc_rx_qflush_done(struct sfc_rxq *rxq);
void sfc_rx_qflush_failed(struct sfc_rxq *rxq);
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
index 2e7b595b..ba8496df 100644
--- a/drivers/net/sfc/sfc_tso.c
+++ b/drivers/net/sfc/sfc_tso.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_ip.h>
diff --git a/drivers/net/sfc/sfc_tweak.h b/drivers/net/sfc/sfc_tweak.h
index fd2f75c3..b4026851 100644
--- a/drivers/net/sfc/sfc_tweak.h
+++ b/drivers/net/sfc/sfc_tweak.h
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_TWEAK_H_
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index d1320f46..757b03ba 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -1,32 +1,10 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "sfc.h"
@@ -56,12 +34,88 @@
*/
#define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
+uint64_t
+sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t caps = 0;
+
+ if ((sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) &&
+ encp->enc_hw_tx_insert_vlan_enabled)
+ caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if (sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
+ caps |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT))
+ caps |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ return caps;
+}
+
+uint64_t
+sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t caps = 0;
+
+ caps |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ caps |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ caps |= DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (encp->enc_tunnel_encapsulations_supported)
+ caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+ if (sa->tso)
+ caps |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ return caps;
+}
+
+static void
+sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
+ const char *verdict, uint64_t offloads)
+{
+ unsigned long long bit;
+
+ while ((bit = __builtin_ffsll(offloads)) != 0) {
+ uint64_t flag = (1ULL << --bit);
+
+ sfc_err(sa, "Tx %s offload %s %s", offload_group,
+ rte_eth_dev_tx_offload_name(flag), verdict);
+
+ offloads &= ~flag;
+ }
+}
+
+static int
+sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
+{
+ uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
+ uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
+ sfc_tx_get_queue_offload_caps(sa);
+ uint64_t rejected = requested & ~supported;
+ uint64_t missing = (requested & mandatory) ^ mandatory;
+ boolean_t mismatch = B_FALSE;
+
+ if (rejected) {
+ sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
+ mismatch = B_TRUE;
+ }
+
+ if (missing) {
+ sfc_tx_log_offloads(sa, "queue", "must be set", missing);
+ mismatch = B_TRUE;
+ }
+
+ return mismatch;
+}
+
static int
-sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
+sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
const struct rte_eth_txconf *tx_conf)
{
- unsigned int flags = tx_conf->txq_flags;
- const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
int rc = 0;
if (tx_conf->tx_rs_thresh != 0) {
@@ -69,10 +123,10 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
rc = EINVAL;
}
- if (tx_conf->tx_free_thresh > EFX_TXQ_LIMIT(nb_tx_desc)) {
+ if (tx_conf->tx_free_thresh > txq_max_fill_level) {
sfc_err(sa,
"TxQ free threshold too large: %u vs maximum %u",
- tx_conf->tx_free_thresh, EFX_TXQ_LIMIT(nb_tx_desc));
+ tx_conf->tx_free_thresh, txq_max_fill_level);
rc = EINVAL;
}
@@ -83,52 +137,16 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
"prefetch/host/writeback thresholds are not supported");
}
- if (((flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) &&
- (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)) {
- sfc_err(sa, "Multi-segment is not supported by %s datapath",
- sa->dp_tx->dp.name);
- rc = EINVAL;
- }
-
- if (((flags & ETH_TXQ_FLAGS_NOMULTMEMP) == 0) &&
- (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)) {
- sfc_err(sa, "multi-mempool is not supported by %s datapath",
- sa->dp_tx->dp.name);
- rc = EINVAL;
- }
-
- if (((flags & ETH_TXQ_FLAGS_NOREFCOUNT) == 0) &&
- (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)) {
- sfc_err(sa,
- "mbuf reference counters are neglected by %s datapath",
- sa->dp_tx->dp.name);
- rc = EINVAL;
- }
-
- if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
- if (!encp->enc_hw_tx_insert_vlan_enabled) {
- sfc_err(sa, "VLAN offload is not supported");
- rc = EINVAL;
- } else if (~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) {
- sfc_err(sa,
- "VLAN offload is not supported by %s datapath",
- sa->dp_tx->dp.name);
- rc = EINVAL;
- }
- }
-
- if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
- sfc_err(sa, "SCTP offload is not supported");
- rc = EINVAL;
- }
-
/* We either perform both TCP and UDP offload, or no offload at all */
- if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
- ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
+ if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+ ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
sfc_err(sa, "TCP and UDP offloads can't be set independently");
rc = EINVAL;
}
+ if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
+ rc = EINVAL;
+
return rc;
}
@@ -145,6 +163,9 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
const struct rte_eth_txconf *tx_conf)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ unsigned int txq_entries;
+ unsigned int evq_entries;
+ unsigned int txq_max_fill_level;
struct sfc_txq_info *txq_info;
struct sfc_evq *evq;
struct sfc_txq *txq;
@@ -153,18 +174,26 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
sfc_log_init(sa, "TxQ = %u", sw_index);
- rc = sfc_tx_qcheck_conf(sa, nb_tx_desc, tx_conf);
+ rc = sa->dp_tx->qsize_up_rings(nb_tx_desc, &txq_entries, &evq_entries,
+ &txq_max_fill_level);
+ if (rc != 0)
+ goto fail_size_up_rings;
+ SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS);
+ SFC_ASSERT(txq_entries <= sa->txq_max_entries);
+ SFC_ASSERT(txq_entries >= nb_tx_desc);
+ SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
+
+ rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
if (rc != 0)
goto fail_bad_conf;
SFC_ASSERT(sw_index < sa->txq_count);
txq_info = &sa->txq_info[sw_index];
- SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
- txq_info->entries = nb_tx_desc;
+ txq_info->entries = txq_entries;
rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,
- txq_info->entries, socket_id, &evq);
+ evq_entries, socket_id, &evq);
if (rc != 0)
goto fail_ev_qinit;
@@ -181,6 +210,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
txq->flags = tx_conf->txq_flags;
+ txq->offloads = tx_conf->offloads;
rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
socket_id, &txq->mem);
@@ -188,12 +218,14 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
goto fail_dma_alloc;
memset(&info, 0, sizeof(info));
+ info.max_fill_level = txq_max_fill_level;
info.free_thresh = txq->free_thresh;
info.flags = tx_conf->txq_flags;
+ info.offloads = tx_conf->offloads;
info.txq_entries = txq_info->entries;
info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
info.txq_hw_ring = txq->mem.esm_base;
- info.evq_entries = txq_info->entries;
+ info.evq_entries = evq_entries;
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = txq->hw_index;
info.mem_bar = sa->mem_bar.esb_base;
@@ -226,6 +258,7 @@ fail_ev_qinit:
txq_info->entries = 0;
fail_bad_conf:
+fail_size_up_rings:
sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
return rc;
}
@@ -270,6 +303,9 @@ sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
static int
sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
{
+ uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
+ sfc_tx_get_queue_offload_caps(sa);
+ uint64_t offloads_rejected = txmode->offloads & ~offloads_supported;
int rc = 0;
switch (txmode->mq_mode) {
@@ -300,6 +336,12 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
rc = EINVAL;
}
+ if (offloads_rejected) {
+ sfc_tx_log_offloads(sa, "device", "is unsupported",
+ offloads_rejected);
+ rc = EINVAL;
+ }
+
return rc;
}
@@ -410,11 +452,13 @@ sfc_tx_close(struct sfc_adapter *sa)
int
sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
{
+ uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
+ sfc_tx_get_queue_offload_caps(sa);
struct rte_eth_dev_data *dev_data;
struct sfc_txq_info *txq_info;
struct sfc_txq *txq;
struct sfc_evq *evq;
- uint16_t flags;
+ uint16_t flags = 0;
unsigned int desc_index;
int rc = 0;
@@ -434,19 +478,40 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
goto fail_ev_qstart;
/*
- * It seems that DPDK has no controls regarding IPv4 offloads,
- * hence, we always enable it here
+ * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
+ * application which expects that IPv4 checksum offload is enabled
+ * all the time as there is no legacy flag to turn off the offload.
*/
- if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
- (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) {
- flags = EFX_TXQ_CKSUM_IPV4;
- } else {
- flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
-
- if (sa->tso)
- flags |= EFX_TXQ_FATSOV2;
+ if ((txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ||
+ (~txq->flags & ETH_TXQ_FLAGS_IGNORE))
+ flags |= EFX_TXQ_CKSUM_IPV4;
+
+ if ((txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
+ (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)))
+ flags |= EFX_TXQ_CKSUM_INNER_IPV4;
+
+ if ((txq->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+ (txq->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ flags |= EFX_TXQ_CKSUM_TCPUDP;
+
+ if ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
+ (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+ flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
+ /*
+ * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
+ * application. In turn, the absence of ETH_TXQ_FLAGS_NOXSUMTCP is
+ * associated specifically with a legacy application which expects
+ * both TCP checksum offload and TSO to be enabled because the legacy
+ * API does not provide a dedicated mechanism to control TSO.
+ */
+ if ((txq->offloads & DEV_TX_OFFLOAD_TCP_TSO) ||
+ ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
+ (~txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP)))
+ flags |= EFX_TXQ_FATSOV2;
+
rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
txq_info->entries, 0 /* not used on EF10 */,
flags, evq->common,
@@ -678,7 +743,7 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
unsigned int pushed = added;
unsigned int pkts_sent = 0;
efx_desc_t *pend = &txq->pend_desc[0];
- const unsigned int hard_max_fill = EFX_TXQ_LIMIT(txq->ptr_mask + 1);
+ const unsigned int hard_max_fill = txq->max_fill_level;
const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
unsigned int fill_level = added - txq->completed;
boolean_t reap_done;
@@ -714,9 +779,9 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/*
* Here VLAN TCI is expected to be zero in case if no
- * DEV_TX_VLAN_OFFLOAD capability is advertised;
+ * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
* if the calling app ignores the absence of
- * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then
+ * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
* TX_ERROR will occur
*/
pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
@@ -865,6 +930,19 @@ sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
return txq;
}
+static sfc_dp_tx_qsize_up_rings_t sfc_efx_tx_qsize_up_rings;
+static int
+sfc_efx_tx_qsize_up_rings(uint16_t nb_tx_desc,
+ unsigned int *txq_entries,
+ unsigned int *evq_entries,
+ unsigned int *txq_max_fill_level)
+{
+ *txq_entries = nb_tx_desc;
+ *evq_entries = nb_tx_desc;
+ *txq_max_fill_level = EFX_TXQ_LIMIT(*txq_entries);
+ return 0;
+}
+
static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
static int
sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
@@ -911,6 +989,7 @@ sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
txq->evq = ctrl_txq->evq;
txq->ptr_mask = info->txq_entries - 1;
+ txq->max_fill_level = info->max_fill_level;
txq->free_thresh = info->free_thresh;
txq->dma_desc_size_max = info->dma_desc_size_max;
@@ -1000,7 +1079,7 @@ sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
if (unlikely(offset > txq->ptr_mask))
return -EINVAL;
- if (unlikely(offset >= EFX_TXQ_LIMIT(txq->ptr_mask + 1)))
+ if (unlikely(offset >= txq->max_fill_level))
return RTE_ETH_TX_DESC_UNAVAIL;
/*
@@ -1040,6 +1119,7 @@ struct sfc_dp_tx sfc_efx_tx = {
SFC_DP_TX_FEAT_MULTI_POOL |
SFC_DP_TX_FEAT_REFCNT |
SFC_DP_TX_FEAT_MULTI_SEG,
+ .qsize_up_rings = sfc_efx_tx_qsize_up_rings,
.qcreate = sfc_efx_tx_qcreate,
.qdestroy = sfc_efx_tx_qdestroy,
.qstart = sfc_efx_tx_qstart,
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index 0c1c7083..c2e5f13e 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -1,39 +1,17 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SFC_TX_H
#define _SFC_TX_H
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include "efx.h"
@@ -81,6 +59,7 @@ struct sfc_txq {
efx_txq_t *common;
unsigned int free_thresh;
unsigned int flags;
+ uint64_t offloads;
};
static inline unsigned int
@@ -110,6 +89,7 @@ struct sfc_efx_txq {
unsigned int added;
unsigned int pending;
unsigned int completed;
+ unsigned int max_fill_level;
unsigned int free_thresh;
uint16_t hw_vlan_tci;
uint16_t dma_desc_size_max;
@@ -150,6 +130,9 @@ void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
int sfc_tx_start(struct sfc_adapter *sa);
void sfc_tx_stop(struct sfc_adapter *sa);
+uint64_t sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa);
+uint64_t sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa);
+
/* From 'sfc_tso.c' */
int sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
unsigned int txq_entries,
diff --git a/drivers/net/softnic/Makefile b/drivers/net/softnic/Makefile
index 09ed62ea..d56fecd6 100644
--- a/drivers/net/softnic/Makefile
+++ b/drivers/net/softnic/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index 3e47c2f9..b0c13415 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -1,41 +1,12 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_bus_vdev.h>
@@ -551,7 +522,7 @@ pmd_ethdev_register(struct rte_vdev_device *vdev,
soft_dev->data->dev_private = dev_private;
soft_dev->data->dev_link.link_speed = hard_speed;
soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- soft_dev->data->dev_link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ soft_dev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
soft_dev->data->mac_addrs = &eth_addr;
soft_dev->data->promiscuous = 1;
diff --git a/drivers/net/softnic/rte_eth_softnic.h b/drivers/net/softnic/rte_eth_softnic.h
index b49e5829..9a2c7ba9 100644
--- a/drivers/net/softnic/rte_eth_softnic.h
+++ b/drivers/net/softnic/rte_eth_softnic.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#ifndef __INCLUDE_RTE_ETH_SOFTNIC_H__
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
index 1f758069..050e3e7e 100644
--- a/drivers/net/softnic/rte_eth_softnic_internals.h
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
@@ -38,7 +9,7 @@
#include <rte_mbuf.h>
#include <rte_sched.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_tm_driver.h>
#include "rte_eth_softnic.h"
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index a4599006..79f1c6a8 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <stdint.h>
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 74f151c4..e53c738d 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -44,7 +44,7 @@
#include <libsze2.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -1214,7 +1214,7 @@ eth_link_update(struct rte_eth_dev *dev,
link.link_status = (link_is_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
- link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ link.link_autoneg = ETH_LINK_FIXED;
rte_atomic64_cmpset((uint64_t *)dev_link, *(uint64_t *)dev_link,
*(uint64_t *)link_ptr);
@@ -1553,7 +1553,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
close(fd);
- if (pci_resource_ptr == NULL) {
+ if (pci_resource_ptr == MAP_FAILED) {
RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n",
rsc_filename, fd);
return -EINVAL;
diff --git a/drivers/net/tap/Makefile b/drivers/net/tap/Makefile
index 405b49e4..ccc5c5fc 100644
--- a/drivers/net/tap/Makefile
+++ b/drivers/net/tap/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
@@ -39,6 +12,12 @@ EXPORT_MAP := rte_pmd_tap_version.map
LIBABIVER := 1
+#
+# TAP_MAX_QUEUES must be a power of 2
+#
+ifeq ($(TAP_MAX_QUEUES),)
+ TAP_MAX_QUEUES = 16
+endif
CFLAGS += -O3
CFLAGS += -I$(SRCDIR)
CFLAGS += -I.
@@ -47,6 +26,8 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
LDLIBS += -lrte_bus_vdev
+CFLAGS += -DTAP_MAX_QUEUES=$(TAP_MAX_QUEUES)
+
#
# all source are stored in SRCS-y
#
@@ -54,6 +35,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += rte_eth_tap.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_netlink.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_tcmsgs.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_bpf_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_intr.c
include $(RTE_SDK)/mk/rte.lib.mk
@@ -80,6 +63,26 @@ tap_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
linux/pkt_cls.h \
enum TCA_FLOWER_KEY_VLAN_PRIO \
$(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_BPF \
+ linux/pkt_cls.h \
+ enum TCA_BPF_UNSPEC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_BPF_FD \
+ linux/pkt_cls.h \
+ enum TCA_BPF_FD \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_ACT_BPF \
+ linux/tc_act/tc_bpf.h \
+ enum TCA_ACT_BPF_UNSPEC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_ACT_BPF_FD \
+ linux/tc_act/tc_bpf.h \
+ enum TCA_ACT_BPF_FD \
+ $(AUTOCONF_OUTPUT)
# Create tap_autoconf.h or update it in case it differs from the new one.
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 6b27679a..f09db0ea 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <rte_atomic.h>
@@ -36,7 +7,7 @@
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_bus_vdev.h>
@@ -53,6 +24,7 @@
#include <sys/mman.h>
#include <errno.h>
#include <signal.h>
+#include <stdbool.h>
#include <stdint.h>
#include <sys/uio.h>
#include <unistd.h>
@@ -60,7 +32,6 @@
#include <net/if.h>
#include <linux/if_tun.h>
#include <linux/if_ether.h>
-#include <linux/version.h>
#include <fcntl.h>
#include <rte_eth_tap.h>
@@ -73,19 +44,14 @@
#define DEFAULT_TAP_NAME "dtap"
#define ETH_TAP_IFACE_ARG "iface"
-#define ETH_TAP_SPEED_ARG "speed"
#define ETH_TAP_REMOTE_ARG "remote"
#define ETH_TAP_MAC_ARG "mac"
#define ETH_TAP_MAC_FIXED "fixed"
-#define FLOWER_KERNEL_VERSION KERNEL_VERSION(4, 2, 0)
-#define FLOWER_VLAN_KERNEL_VERSION KERNEL_VERSION(4, 9, 0)
-
static struct rte_vdev_driver pmd_tap_drv;
static const char *valid_arguments[] = {
ETH_TAP_IFACE_ARG,
- ETH_TAP_SPEED_ARG,
ETH_TAP_REMOTE_ARG,
ETH_TAP_MAC_ARG,
NULL
@@ -99,7 +65,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_SPEED_AUTONEG
+ .link_autoneg = ETH_LINK_AUTONEG
};
static void
@@ -285,6 +251,45 @@ tap_verify_csum(struct rte_mbuf *mbuf)
}
}
+static uint64_t
+tap_rx_offload_get_port_capa(void)
+{
+ /*
+ * In order to support legacy apps,
+ * report capabilities also as port capabilities.
+ */
+ return DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+}
+
+static uint64_t
+tap_rx_offload_get_queue_capa(void)
+{
+ return DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+}
+
+static bool
+tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
+{
+ uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
+ uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
+ uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
+
+ if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+ offloads)
+ return false;
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return false;
+ return true;
+}
+
/* Callback to handle the rx burst of packets to the correct interface and
* file descriptor(s) in a multi-queue setup.
*/
@@ -309,8 +314,9 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
int len;
len = readv(rxq->fd, *rxq->iovecs,
- 1 + (rxq->rxmode->enable_scatter ?
- rxq->nb_rx_desc : 1));
+ 1 +
+ (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+ rxq->nb_rx_desc : 1));
if (len < (int)sizeof(struct tun_pi))
break;
@@ -365,7 +371,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
seg->next = NULL;
mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
RTE_PTYPE_ALL_MASK);
- if (rxq->rxmode->hw_ip_checksum)
+ if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
tap_verify_csum(mbuf);
/* account for the receive frame */
@@ -379,6 +385,44 @@ end:
return num_rx;
}
+static uint64_t
+tap_tx_offload_get_port_capa(void)
+{
+ /*
+ * In order to support legacy apps,
+ * report capabilities also as port capabilities.
+ */
+ return DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+}
+
+static uint64_t
+tap_tx_offload_get_queue_capa(void)
+{
+ return DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+}
+
+static bool
+tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
+{
+ uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
+ uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
+
+ if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
+ offloads)
+ return false;
+ /* Verify we have no conflict with port offloads */
+ if ((port_offloads ^ offloads) & port_supp_offloads)
+ return false;
+ return true;
+}
+
static void
tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
unsigned int l3_len)
@@ -465,9 +509,10 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
rte_pktmbuf_mtod(seg, void *);
seg = seg->next;
}
- if (mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
- (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
- (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) {
+ if (txq->csum &&
+ ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
+ (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
+ (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
/* Support only packets with all data in the same seg */
if (mbuf->nb_segs > 1)
break;
@@ -605,6 +650,17 @@ tap_dev_stop(struct rte_eth_dev *dev)
static int
tap_dev_configure(struct rte_eth_dev *dev)
{
+ uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa();
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+ if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
+ rte_errno = ENOTSUP;
+ RTE_LOG(ERR, PMD,
+ "Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+ tx_offloads, supp_tx_offloads);
+ return -rte_errno;
+ }
if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
RTE_LOG(ERR, PMD,
"%s: number of rx queues %d exceeds max num of queues %d\n",
@@ -678,13 +734,12 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = 0;
dev_info->pci_dev = NULL;
dev_info->speed_capa = tap_dev_speed_capa();
- dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- dev_info->tx_offload_capa =
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
+ dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
+ dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
+ dev_info->tx_queue_offload_capa;
}
static int
@@ -1000,6 +1055,19 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
return -1;
}
+ /* Verify application offloads are valid for our port and queue. */
+ if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
+ rte_errno = ENOTSUP;
+ RTE_LOG(ERR, PMD,
+ "%p: Rx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64 "\n",
+ (void *)dev, rx_conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (tap_rx_offload_get_port_capa() |
+ tap_rx_offload_get_queue_capa()));
+ return -rte_errno;
+ }
rxq->mp = mp;
rxq->trigger_seen = 1; /* force initial burst */
rxq->in_port = dev->data->port_id;
@@ -1058,21 +1126,46 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_queue_id,
uint16_t nb_tx_desc __rte_unused,
unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
+ const struct rte_eth_txconf *tx_conf)
{
struct pmd_internals *internals = dev->data->dev_private;
+ struct tx_queue *txq;
int ret;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -1;
-
dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
+ /*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if (tx_conf != NULL &&
+ !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
+ if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
+ txq->csum = !!(tx_conf->offloads &
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM));
+ } else {
+ rte_errno = ENOTSUP;
+ RTE_LOG(ERR, PMD,
+ "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ tap_tx_offload_get_port_capa());
+ return -rte_errno;
+ }
+ }
ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
if (ret == -1)
return -1;
-
- RTE_LOG(DEBUG, PMD, " TX TAP device name %s, qid %d on fd %d\n",
- internals->name, tx_queue_id, internals->txq[tx_queue_id].fd);
+ RTE_LOG(DEBUG, PMD,
+ " TX TAP device name %s, qid %d on fd %d csum %s\n",
+ internals->name, tx_queue_id, internals->txq[tx_queue_id].fd,
+ txq->csum ? "on" : "off");
return 0;
}
@@ -1123,35 +1216,49 @@ tap_dev_intr_handler(void *cb_arg)
struct rte_eth_dev *dev = cb_arg;
struct pmd_internals *pmd = dev->data->dev_private;
- nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
+ tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
}
static int
-tap_intr_handle_set(struct rte_eth_dev *dev, int set)
+tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
{
struct pmd_internals *pmd = dev->data->dev_private;
/* In any case, disable interrupt if the conf is no longer there. */
if (!dev->data->dev_conf.intr_conf.lsc) {
if (pmd->intr_handle.fd != -1) {
- nl_final(pmd->intr_handle.fd);
+ tap_nl_final(pmd->intr_handle.fd);
rte_intr_callback_unregister(&pmd->intr_handle,
tap_dev_intr_handler, dev);
}
return 0;
}
if (set) {
- pmd->intr_handle.fd = nl_init(RTMGRP_LINK);
+ pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK);
if (unlikely(pmd->intr_handle.fd == -1))
return -EBADF;
return rte_intr_callback_register(
&pmd->intr_handle, tap_dev_intr_handler, dev);
}
- nl_final(pmd->intr_handle.fd);
+ tap_nl_final(pmd->intr_handle.fd);
return rte_intr_callback_unregister(&pmd->intr_handle,
tap_dev_intr_handler, dev);
}
+static int
+tap_intr_handle_set(struct rte_eth_dev *dev, int set)
+{
+ int err;
+
+ err = tap_lsc_intr_handle_set(dev, set);
+ if (err)
+ return err;
+ err = tap_rx_intr_vec_set(dev, set);
+ if (err && set)
+ tap_lsc_intr_handle_set(dev, 0);
+ return err;
+}
+
static const uint32_t*
tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
{
@@ -1244,13 +1351,13 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node);
if (!data) {
RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n");
- goto error_exit;
+ goto error_exit_nodev;
}
dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
if (!dev) {
RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n");
- goto error_exit;
+ goto error_exit_nodev;
}
pmd = dev->data->dev_private;
@@ -1284,6 +1391,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
pmd->intr_handle.fd = -1;
+ dev->intr_handle = &pmd->intr_handle;
/* Presetup the fds to -1 as being not valid */
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
@@ -1328,7 +1436,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
* - rte_flow actual/implicit lists
* - implicit rules
*/
- pmd->nlsk_fd = nl_init(0);
+ pmd->nlsk_fd = tap_nl_init(0);
if (pmd->nlsk_fd == -1) {
RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n",
pmd->name);
@@ -1420,6 +1528,11 @@ error_remote:
tap_flow_implicit_flush(pmd, NULL);
error_exit:
+ if (pmd->ioctl_sock > 0)
+ close(pmd->ioctl_sock);
+ rte_eth_dev_release_port(dev);
+
+error_exit_nodev:
RTE_LOG(ERR, PMD, "TAP Unable to initialize %s\n",
rte_vdev_device_name(vdev));
@@ -1444,16 +1557,6 @@ set_interface_name(const char *key __rte_unused,
}
static int
-set_interface_speed(const char *key __rte_unused,
- const char *value,
- void *extra_args)
-{
- *(int *)extra_args = (value) ? atoi(value) : ETH_SPEED_NUM_10G;
-
- return 0;
-}
-
-static int
set_remote_iface(const char *key __rte_unused,
const char *value,
void *extra_args)
@@ -1503,15 +1606,6 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
kvlist = rte_kvargs_parse(params, valid_arguments);
if (kvlist) {
- if (rte_kvargs_count(kvlist, ETH_TAP_SPEED_ARG) == 1) {
- ret = rte_kvargs_process(kvlist,
- ETH_TAP_SPEED_ARG,
- &set_interface_speed,
- &speed);
- if (ret == -1)
- goto leave;
- }
-
if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
ret = rte_kvargs_process(kvlist,
ETH_TAP_IFACE_ARG,
@@ -1579,7 +1673,7 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev)
if (internals->nlsk_fd) {
tap_flow_flush(eth_dev, NULL);
tap_flow_implicit_flush(internals, NULL);
- nl_final(internals->nlsk_fd);
+ tap_nl_final(internals->nlsk_fd);
}
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
if (internals->rxq[i].fd != -1) {
@@ -1609,6 +1703,5 @@ RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
RTE_PMD_REGISTER_PARAM_STRING(net_tap,
ETH_TAP_IFACE_ARG "=<string> "
- ETH_TAP_SPEED_ARG "=<int> "
ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_FIXED " "
ETH_TAP_REMOTE_ARG "=<string>");
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index 829f32f3..53a506ad 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#ifndef _RTE_ETH_TAP_H_
@@ -41,11 +13,11 @@
#include <linux/if_tun.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ether.h>
#ifdef IFF_MULTI_QUEUE
-#define RTE_PMD_TAP_MAX_QUEUES 16
+#define RTE_PMD_TAP_MAX_QUEUES TAP_MAX_QUEUES
#else
#define RTE_PMD_TAP_MAX_QUEUES 1
#endif
@@ -76,6 +48,7 @@ struct rx_queue {
struct tx_queue {
int fd;
uint16_t *mtu; /* Pointer to MTU from dev_data */
+ uint16_t csum:1; /* Enable checksum offloading */
struct pkt_stats stats; /* Stats for this TX queue */
};
@@ -90,6 +63,13 @@ struct pmd_internals {
int ioctl_sock; /* socket for ioctl calls */
int nlsk_fd; /* Netlink socket fd */
int flow_isolate; /* 1 if flow isolation is enabled */
+ int flower_support; /* 1 if kernel supports, else 0 */
+ int flower_vlan_support; /* 1 if kernel supports, else 0 */
+ int rss_enabled; /* 1 if RSS is enabled, else 0 */
+ /* implicit rules set when RSS is enabled */
+ int map_fd; /* BPF RSS map fd */
+ int bpf_fd[RTE_PMD_TAP_MAX_QUEUES];/* List of bpf fds per queue */
+ LIST_HEAD(tap_rss_flows, rte_flow) rss_flows;
LIST_HEAD(tap_flows, rte_flow) flows; /* rte_flow rules */
/* implicit rte_flow rules set when a remote device is active */
LIST_HEAD(tap_implicit_flows, rte_flow) implicit_flows;
@@ -98,4 +78,8 @@ struct pmd_internals {
struct rte_intr_handle intr_handle; /* LSC interrupt handle. */
};
+/* tap_intr.c */
+
+int tap_rx_intr_vec_set(struct rte_eth_dev *dev, int set);
+
#endif /* _RTE_ETH_TAP_H_ */
diff --git a/drivers/net/tap/tap_bpf.h b/drivers/net/tap/tap_bpf.h
new file mode 100644
index 00000000..1a70ffe2
--- /dev/null
+++ b/drivers/net/tap/tap_bpf.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+ * Copyright 2017 Mellanox Technologies, Ltd.
+ */
+
+#ifndef __TAP_BPF_H__
+#define __TAP_BPF_H__
+
+#include <tap_autoconf.h>
+
+/* Do not #include <linux/bpf.h> since eBPF must compile on different
+ * distros which may include partial definitions for eBPF (while the
+ * kernel itself may support eBPF). Instead define here all that is needed
+ */
+
+/* BPF_MAP_UPDATE_ELEM command flags */
+#define BPF_ANY 0 /* create a new element or update an existing */
+
+/* BPF architecture instruction struct */
+struct bpf_insn {
+ __u8 code;
+ __u8 dst_reg:4;
+ __u8 src_reg:4;
+ __s16 off;
+ __s32 imm; /* immediate value */
+};
+
+/* BPF program types */
+enum bpf_prog_type {
+ BPF_PROG_TYPE_UNSPEC,
+ BPF_PROG_TYPE_SOCKET_FILTER,
+ BPF_PROG_TYPE_KPROBE,
+ BPF_PROG_TYPE_SCHED_CLS,
+ BPF_PROG_TYPE_SCHED_ACT,
+};
+
+/* BPF commands types */
+enum bpf_cmd {
+ BPF_MAP_CREATE,
+ BPF_MAP_LOOKUP_ELEM,
+ BPF_MAP_UPDATE_ELEM,
+ BPF_MAP_DELETE_ELEM,
+ BPF_MAP_GET_NEXT_KEY,
+ BPF_PROG_LOAD,
+};
+
+/* BPF maps types */
+enum bpf_map_type {
+ BPF_MAP_TYPE_UNSPEC,
+ BPF_MAP_TYPE_HASH,
+};
+
+/* union of anonymous structs used with TAP BPF commands */
+union bpf_attr {
+ /* BPF_MAP_CREATE command */
+ struct {
+ __u32 map_type;
+ __u32 key_size;
+ __u32 value_size;
+ __u32 max_entries;
+ __u32 map_flags;
+ __u32 inner_map_fd;
+ };
+
+ /* BPF_MAP_UPDATE_ELEM, BPF_MAP_DELETE_ELEM commands */
+ struct {
+ __u32 map_fd;
+ __aligned_u64 key;
+ union {
+ __aligned_u64 value;
+ __aligned_u64 next_key;
+ };
+ __u64 flags;
+ };
+
+ /* BPF_PROG_LOAD command */
+ struct {
+ __u32 prog_type;
+ __u32 insn_cnt;
+ __aligned_u64 insns;
+ __aligned_u64 license;
+ __u32 log_level;
+ __u32 log_size;
+ __aligned_u64 log_buf;
+ __u32 kern_version;
+ __u32 prog_flags;
+ };
+} __attribute__((aligned(8)));
+
+#ifndef __NR_bpf
+# if defined(__i386__)
+# define __NR_bpf 357
+# elif defined(__x86_64__)
+# define __NR_bpf 321
+# elif defined(__arm__)
+# define __NR_bpf 386
+# elif defined(__aarch64__)
+# define __NR_bpf 280
+# elif defined(__sparc__)
+# define __NR_bpf 349
+# elif defined(__s390__)
+# define __NR_bpf 351
+# elif defined(__powerpc__)
+# define __NR_bpf 361
+# else
+# error __NR_bpf not defined
+# endif
+#endif
+
+enum {
+ BPF_MAP_ID_KEY,
+ BPF_MAP_ID_SIMPLE,
+};
+
+static int bpf_load(enum bpf_prog_type type, const struct bpf_insn *insns,
+ size_t insns_cnt, const char *license);
+
+#endif /* __TAP_BPF_H__ */
diff --git a/drivers/net/tap/tap_bpf_api.c b/drivers/net/tap/tap_bpf_api.c
new file mode 100644
index 00000000..109a681e
--- /dev/null
+++ b/drivers/net/tap/tap_bpf_api.c
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Mellanox Technologies, Ltd.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <rte_malloc.h>
+#include <rte_eth_tap.h>
+#include <tap_flow.h>
+#include <tap_autoconf.h>
+#include <tap_tcmsgs.h>
+#include <tap_bpf.h>
+#include <tap_bpf_insns.h>
+
+/**
+ * Load BPF program (section cls_q) into the kernel and return a bpf fd
+ *
+ * @param queue_idx
+ * Queue index matching packet cb
+ *
+ * @return
+ * -1 if the BPF program couldn't be loaded. An fd (int) otherwise.
+ */
+int tap_flow_bpf_cls_q(__u32 queue_idx)
+{
+ cls_q_insns[1].imm = queue_idx;
+
+ return bpf_load(BPF_PROG_TYPE_SCHED_CLS,
+ (struct bpf_insn *)cls_q_insns,
+ RTE_DIM(cls_q_insns),
+ "Dual BSD/GPL");
+}
+
+/**
+ * Load BPF program (section l3_l4) into the kernel and return a bpf fd.
+ *
+ * @param[in] key_idx
+ * RSS MAP key index
+ *
+ * @param[in] map_fd
+ * BPF RSS map file descriptor
+ *
+ * @return
+ * -1 if the BPF program couldn't be loaded. An fd (int) otherwise.
+ */
+int tap_flow_bpf_calc_l3_l4_hash(__u32 key_idx, int map_fd)
+{
+ l3_l4_hash_insns[4].imm = key_idx;
+ l3_l4_hash_insns[9].imm = map_fd;
+
+ return bpf_load(BPF_PROG_TYPE_SCHED_ACT,
+ (struct bpf_insn *)l3_l4_hash_insns,
+ RTE_DIM(l3_l4_hash_insns),
+ "Dual BSD/GPL");
+}
+
+/**
+ * Helper function to convert a pointer to unsigned 64 bits
+ *
+ * @param[in] ptr
+ * pointer to address
+ *
+ * @return
+ * 64 bit unsigned long type of pointer address
+ */
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64)(unsigned long)ptr;
+}
+
+/**
+ * Call BPF system call
+ *
+ * @param[in] cmd
+ * BPF command for program loading, map creation, map entry update, etc
+ *
+ * @param[in] attr
+ * System call attributes relevant to system call command
+ *
+ * @param[in] size
+ * size of attr parameter
+ *
+ * @return
+ * -1 if BPF system call failed, 0 otherwise
+ */
+static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
+ unsigned int size)
+{
+ return syscall(__NR_bpf, cmd, attr, size);
+}
+
+/**
+ * Load BPF instructions to kernel
+ *
+ * @param[in] type
+ * BPF program type: classifieir or action
+ *
+ * @param[in] insns
+ * Array of BPF instructions (equivalent to BPF instructions)
+ *
+ * @param[in] insns_cnt
+ * Number of BPF instructions (size of array)
+ *
+ * @param[in] lincense
+ * License string that must be acknowledged by the kernel
+ *
+ * @return
+ * -1 if the BPF program couldn't be loaded, fd (file descriptor) otherwise
+ */
+static int bpf_load(enum bpf_prog_type type,
+ const struct bpf_insn *insns,
+ size_t insns_cnt,
+ const char *license)
+{
+ union bpf_attr attr = {};
+
+ bzero(&attr, sizeof(attr));
+ attr.prog_type = type;
+ attr.insn_cnt = (__u32)insns_cnt;
+ attr.insns = ptr_to_u64(insns);
+ attr.license = ptr_to_u64(license);
+ attr.log_buf = ptr_to_u64(NULL);
+ attr.log_level = 0;
+ attr.kern_version = 0;
+
+ return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+}
+
+/**
+ * Create BPF map for RSS rules
+ *
+ * @param[in] key_size
+ * map RSS key size
+ *
+ * @param[in] value_size
+ * Map RSS value size
+ *
+ * @param[in] max_entries
+ * Map max number of RSS entries (limit on max RSS rules)
+ *
+ * @return
+ * -1 if BPF map couldn't be created, map fd otherwise
+ */
+int tap_flow_bpf_rss_map_create(unsigned int key_size,
+ unsigned int value_size,
+ unsigned int max_entries)
+{
+ union bpf_attr attr = {};
+
+ bzero(&attr, sizeof(attr));
+ attr.map_type = BPF_MAP_TYPE_HASH;
+ attr.key_size = key_size;
+ attr.value_size = value_size;
+ attr.max_entries = max_entries;
+
+ return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+}
+
+/**
+ * Update RSS entry in BPF map
+ *
+ * @param[in] fd
+ * RSS map fd
+ *
+ * @param[in] key
+ * Pointer to RSS key whose entry is updated
+ *
+ * @param[in] value
+ * Pointer to RSS new updated value
+ *
+ * @return
+ * -1 if RSS entry failed to be updated, 0 otherwise
+ */
+int tap_flow_bpf_update_rss_elem(int fd, void *key, void *value)
+{
+ union bpf_attr attr = {};
+
+ bzero(&attr, sizeof(attr));
+
+ attr.map_type = BPF_MAP_TYPE_HASH;
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+ attr.flags = BPF_ANY;
+
+ return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
+}
diff --git a/drivers/net/tap/tap_bpf_insns.h b/drivers/net/tap/tap_bpf_insns.h
new file mode 100644
index 00000000..89873b6d
--- /dev/null
+++ b/drivers/net/tap/tap_bpf_insns.h
@@ -0,0 +1,1696 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Mellanox Technologies, Ltd.
+ */
+
+#include <tap_bpf.h>
+
+/* bpf_insn array matching cls_q section. See tap_bpf_program.c file */
+struct bpf_insn cls_q_insns[] = {
+ {0x61, 2, 1, 52, 0x00000000},
+ {0x18, 3, 0, 0, 0xdeadbeef},
+ {0x00, 0, 0, 0, 0x00000000},
+ {0x63, 10, 3, -4, 0x00000000},
+ {0xb7, 0, 0, 0, 0x00000000},
+ {0x61, 3, 10, -4, 0x00000000},
+ {0x07, 3, 0, 0, 0x7cafe800},
+ {0x67, 3, 0, 0, 0x00000020},
+ {0x77, 3, 0, 0, 0x00000020},
+ {0x5d, 2, 3, 4, 0x00000000},
+ {0xb7, 2, 0, 0, 0x00000000},
+ {0x63, 1, 2, 52, 0x00000000},
+ {0x18, 0, 0, 0, 0xffffffff},
+ {0x00, 0, 0, 0, 0x00000000},
+ {0x95, 0, 0, 0, 0x00000000},
+};
+
+/* bpf_insn array matching l3_l4 section. see tap_bpf_program.c file */
+struct bpf_insn l3_l4_hash_insns[] = {
+ {0xbf, 7, 1, 0, 0x00000000},
+ {0x61, 8, 7, 16, 0x00000000},
+ {0x61, 6, 7, 76, 0x00000000},
+ {0x61, 9, 7, 80, 0x00000000},
+ {0x18, 1, 0, 0, 0xdeadbeef},
+ {0x00, 0, 0, 0, 0x00000000},
+ {0x63, 10, 1, -4, 0x00000000},
+ {0xbf, 2, 10, 0, 0x00000000},
+ {0x07, 2, 0, 0, 0xfffffffc},
+ {0x18, 1, 1, 0, 0x0000cafe},
+ {0x00, 0, 0, 0, 0x00000000},
+ {0x85, 0, 0, 0, 0x00000001},
+ {0x55, 0, 0, 21, 0x00000000},
+ {0xb7, 1, 0, 0, 0x00000a64},
+ {0x6b, 10, 1, -16, 0x00000000},
+ {0x18, 1, 0, 0, 0x69666e6f},
+ {0x00, 0, 0, 0, 0x65727567},
+ {0x7b, 10, 1, -24, 0x00000000},
+ {0x18, 1, 0, 0, 0x6e207369},
+ {0x00, 0, 0, 0, 0x6320746f},
+ {0x7b, 10, 1, -32, 0x00000000},
+ {0x18, 1, 0, 0, 0x20737372},
+ {0x00, 0, 0, 0, 0x2079656b},
+ {0x7b, 10, 1, -40, 0x00000000},
+ {0x18, 1, 0, 0, 0x68736168},
+ {0x00, 0, 0, 0, 0x203a2928},
+ {0x7b, 10, 1, -48, 0x00000000},
+ {0xb7, 7, 0, 0, 0x00000000},
+ {0x73, 10, 7, -14, 0x00000000},
+ {0xbf, 1, 10, 0, 0x00000000},
+ {0x07, 1, 0, 0, 0xffffffd0},
+ {0xb7, 2, 0, 0, 0x00000023},
+ {0x85, 0, 0, 0, 0x00000006},
+ {0x05, 0, 0, 1632, 0x00000000},
+ {0xb7, 1, 0, 0, 0x0000000e},
+ {0x61, 2, 7, 20, 0x00000000},
+ {0x15, 2, 0, 10, 0x00000000},
+ {0x61, 2, 7, 28, 0x00000000},
+ {0x55, 2, 0, 8, 0x0000a888},
+ {0xbf, 2, 7, 0, 0x00000000},
+ {0xb7, 7, 0, 0, 0x00000000},
+ {0xbf, 1, 6, 0, 0x00000000},
+ {0x07, 1, 0, 0, 0x00000012},
+ {0x2d, 1, 9, 1622, 0x00000000},
+ {0xb7, 1, 0, 0, 0x00000012},
+ {0x69, 8, 6, 16, 0x00000000},
+ {0xbf, 7, 2, 0, 0x00000000},
+ {0x7b, 10, 7, -56, 0x00000000},
+ {0x57, 8, 0, 0, 0x0000ffff},
+ {0x15, 8, 0, 409, 0x0000dd86},
+ {0xb7, 7, 0, 0, 0x00000003},
+ {0x55, 8, 0, 1614, 0x00000008},
+ {0x0f, 6, 1, 0, 0x00000000},
+ {0xb7, 7, 0, 0, 0x00000000},
+ {0xbf, 1, 6, 0, 0x00000000},
+ {0x07, 1, 0, 0, 0x00000018},
+ {0x2d, 1, 9, 1609, 0x00000000},
+ {0x71, 3, 6, 12, 0x00000000},
+ {0xbf, 1, 3, 0, 0x00000000},
+ {0x67, 1, 0, 0, 0x00000038},
+ {0xc7, 1, 0, 0, 0x00000020},
+ {0x77, 1, 0, 0, 0x0000001f},
+ {0x57, 1, 0, 0, 0x2cc681d1},
+ {0x67, 3, 0, 0, 0x00000018},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x40000000},
+ {0xb7, 2, 0, 0, 0x00000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x598d03a2},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x20000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb31a0745},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x10000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x66340e8a},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x08000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcc681d15},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x04000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x98d03a2b},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x02000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x31a07456},
+ {0x57, 3, 0, 0, 0x01000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6340e8ad},
+ {0x71, 3, 6, 13, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000010},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00800000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc681d15b},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00400000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d03a2b7},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00200000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1a07456f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00100000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x340e8ade},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00080000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x681d15bd},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00040000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd03a2b7b},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00020000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa07456f6},
+ {0x57, 3, 0, 0, 0x00010000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x40e8aded},
+ {0x71, 3, 6, 14, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000008},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00008000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x81d15bdb},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00004000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x03a2b7b7},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00002000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x07456f6f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00001000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0e8adedf},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000800},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1d15bdbf},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000400},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3a2b7b7e},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000200},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7456f6fd},
+ {0x57, 3, 0, 0, 0x00000100},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe8adedfa},
+ {0x71, 3, 6, 15, 0x00000000},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000080},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd15bdbf4},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000040},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa2b7b7e9},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000020},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x456f6fd3},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000010},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8adedfa7},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000008},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x15bdbf4f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000004},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2b7b7e9e},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000002},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x56f6fd3d},
+ {0x57, 3, 0, 0, 0x00000001},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xadedfa7b},
+ {0x71, 4, 6, 16, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000038},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0xb7, 3, 0, 0, 0xffffffff},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5bdbf4f7},
+ {0x67, 4, 0, 0, 0x00000018},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb7b7e9ef},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6f6fd3df},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdedfa7bf},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbdbf4f7f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7b7e9eff},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf6fd3dff},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xedfa7bfe},
+ {0x71, 4, 6, 17, 0x00000000},
+ {0x67, 4, 0, 0, 0x00000010},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdbf4f7fc},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb7e9eff9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6fd3dff2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdfa7bfe5},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbf4f7fca},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7e9eff94},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfd3dff28},
+ {0x57, 4, 0, 0, 0x00010000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfa7bfe51},
+ {0x71, 4, 6, 18, 0x00000000},
+ {0x67, 4, 0, 0, 0x00000008},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf4f7fca2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe9eff945},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd3dff28a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa7bfe514},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4f7fca28},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9eff9450},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3dff28a0},
+ {0x57, 4, 0, 0, 0x00000100},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7bfe5141},
+ {0x71, 4, 6, 19, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf7fca283},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xeff94506},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdff28a0c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbfe51418},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7fca2831},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xff945063},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xff28a0c6},
+ {0x57, 4, 0, 0, 0x00000001},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfe51418c},
+ {0x71, 4, 6, 20, 0x00000000},
+ {0x67, 4, 0, 0, 0x00000008},
+ {0x71, 5, 6, 21, 0x00000000},
+ {0x4f, 4, 5, 0, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000030},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfca28319},
+ {0x67, 4, 0, 0, 0x00000010},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x40000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf9450633},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x20000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf28a0c67},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x10000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe51418ce},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x08000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xca28319d},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x04000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9450633b},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x02000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x28a0c676},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x01000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x51418ced},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00800000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa28319db},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00400000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x450633b6},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00200000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8a0c676c},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00100000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1418ced8},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00080000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x28319db1},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00040000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x50633b63},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00020000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa0c676c6},
+ {0x57, 4, 0, 0, 0x00010000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x418ced8d},
+ {0x71, 3, 6, 22, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000008},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00008000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8319db1a},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00004000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0633b634},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00002000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0c676c68},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00001000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x18ced8d1},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000800},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x319db1a3},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000400},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x633b6347},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000200},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc676c68f},
+ {0x57, 3, 0, 0, 0x00000100},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8ced8d1f},
+ {0x71, 3, 6, 23, 0x00000000},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000080},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x19db1a3e},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000040},
+ {0x79, 5, 10, -56, 0x00000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x33b6347d},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000020},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x676c68fa},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000010},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xced8d1f4},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000008},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9db1a3e9},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000004},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3b6347d2},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000002},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x76c68fa5},
+ {0x57, 3, 0, 0, 0x00000001},
+ {0x1d, 3, 2, 1177, 0x00000000},
+ {0xa7, 1, 0, 0, 0xed8d1f4a},
+ {0x05, 0, 0, 1175, 0x00000000},
+ {0x0f, 6, 1, 0, 0x00000000},
+ {0xb7, 7, 0, 0, 0x00000000},
+ {0xbf, 1, 6, 0, 0x00000000},
+ {0x07, 1, 0, 0, 0x0000002c},
+ {0x2d, 1, 9, 1202, 0x00000000},
+ {0x61, 4, 6, 8, 0x00000000},
+ {0xbf, 1, 4, 0, 0x00000000},
+ {0x67, 1, 0, 0, 0x00000038},
+ {0xc7, 1, 0, 0, 0x00000020},
+ {0x77, 1, 0, 0, 0x0000001f},
+ {0x57, 1, 0, 0, 0x2cc681d1},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000040},
+ {0xb7, 2, 0, 0, 0x00000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x598d03a2},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000020},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb31a0745},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000010},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x66340e8a},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000008},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcc681d15},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000004},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x98d03a2b},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000002},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x31a07456},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000001},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6340e8ad},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00008000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc681d15b},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00004000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d03a2b7},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00002000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1a07456f},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00001000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x340e8ade},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000800},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x681d15bd},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000400},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd03a2b7b},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000200},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa07456f6},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00000100},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x40e8aded},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00800000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x81d15bdb},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00400000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x03a2b7b7},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00200000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x07456f6f},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00100000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0e8adedf},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00080000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1d15bdbf},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00040000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3a2b7b7e},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00020000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7456f6fd},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00010000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe8adedfa},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0xb7, 3, 0, 0, 0xffffffff},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd15bdbf4},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa2b7b7e9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x456f6fd3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8adedfa7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x15bdbf4f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2b7b7e9e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x56f6fd3d},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xadedfa7b},
+ {0x61, 4, 6, 12, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5bdbf4f7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb7b7e9ef},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6f6fd3df},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdedfa7bf},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbdbf4f7f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7b7e9eff},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf6fd3dff},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xedfa7bfe},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdbf4f7fc},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb7e9eff9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6fd3dff2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdfa7bfe5},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbf4f7fca},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7e9eff94},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfd3dff28},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfa7bfe51},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf4f7fca2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe9eff945},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd3dff28a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa7bfe514},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4f7fca28},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9eff9450},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3dff28a0},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7bfe5141},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf7fca283},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xeff94506},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdff28a0c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbfe51418},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7fca2831},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xff945063},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xff28a0c6},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfe51418c},
+ {0x61, 4, 6, 16, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfca28319},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf9450633},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf28a0c67},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe51418ce},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xca28319d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9450633b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x28a0c676},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x51418ced},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa28319db},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x450633b6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8a0c676c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1418ced8},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x28319db1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x50633b63},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa0c676c6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x418ced8d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8319db1a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0633b634},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0c676c68},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x18ced8d1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x319db1a3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x633b6347},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc676c68f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8ced8d1f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x19db1a3e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x33b6347d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x676c68fa},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xced8d1f4},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9db1a3e9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3b6347d2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x76c68fa5},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xed8d1f4a},
+ {0x61, 4, 6, 20, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xdb1a3e94},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb6347d28},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6c68fa51},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd8d1f4a3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb1a3e946},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6347d28d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc68fa51a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d1f4a35},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1a3e946b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x347d28d7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x68fa51ae},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd1f4a35c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa3e946b9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x47d28d73},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8fa51ae7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1f4a35cf},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3e946b9e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7d28d73c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfa51ae78},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf4a35cf1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe946b9e3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd28d73c7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa51ae78e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4a35cf1c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x946b9e38},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x28d73c71},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x51ae78e3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa35cf1c6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x46b9e38d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d73c71b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1ae78e36},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x35cf1c6c},
+ {0x61, 4, 6, 24, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6b9e38d9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd73c71b2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xae78e364},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5cf1c6c9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb9e38d92},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x73c71b25},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe78e364b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcf1c6c96},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9e38d92c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3c71b259},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x78e364b2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf1c6c964},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe38d92c9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc71b2593},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8e364b27},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1c6c964e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x38d92c9c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x71b25938},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe364b270},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc6c964e0},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d92c9c0},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1b259380},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x364b2700},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6c964e01},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd92c9c03},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb2593807},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x64b2700f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc964e01e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x92c9c03d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2593807a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4b2700f4},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x964e01e8},
+ {0x61, 4, 6, 28, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2c9c03d1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x593807a3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb2700f46},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x64e01e8d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc9c03d1a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x93807a35},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2700f46b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4e01e8d6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9c03d1ad},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3807a35b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x700f46b6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe01e8d6c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc03d1ad9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x807a35b3},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x00f46b66},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x01e8d6cc},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x03d1ad99},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x07a35b32},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0f46b665},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1e8d6cca},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3d1ad994},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7a35b328},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf46b6651},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe8d6cca2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd1ad9944},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa35b3289},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x46b66512},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8d6cca25},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1ad9944a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x35b32894},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6b665129},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd6cca253},
+ {0x61, 4, 6, 32, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xad9944a7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5b32894f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb665129f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6cca253e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd9944a7d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb32894fb},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x665129f6},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcca253ec},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9944a7d9},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x32894fb2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x65129f65},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xca253eca},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x944a7d95},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2894fb2a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5129f655},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa253ecab},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x44a7d956},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x894fb2ac},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x129f6558},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x253ecab1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4a7d9563},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x94fb2ac7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x29f6558f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x53ecab1e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xa7d9563d},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4fb2ac7a},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9f6558f5},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3ecab1ea},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7d9563d5},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfb2ac7ab},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf6558f56},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xecab1eac},
+ {0x61, 4, 6, 36, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000080},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd9563d59},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000040},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb2ac7ab2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000020},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6558f564},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000010},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcab1eac8},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000008},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9563d590},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000004},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2ac7ab20},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000002},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x558f5641},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000001},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xab1eac83},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00008000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x563d5906},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00004000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xac7ab20c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00002000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x58f56418},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00001000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb1eac831},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000800},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x63d59063},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000400},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc7ab20c7},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000200},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8f56418f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00000100},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1eac831e},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00800000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3d59063c},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00400000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7ab20c78},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00200000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf56418f0},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00100000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xeac831e1},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00080000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xd59063c2},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00040000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xab20c784},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00020000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x56418f09},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x00010000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xac831e12},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000020},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x59063c25},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x40000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xb20c784b},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x20000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x6418f097},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x10000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc831e12f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x08000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9063c25f},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x04000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x20c784be},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x57, 5, 0, 0, 0x02000000},
+ {0x1d, 5, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x418f097c},
+ {0x57, 4, 0, 0, 0x01000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x831e12f9},
+ {0x71, 4, 6, 40, 0x00000000},
+ {0x67, 4, 0, 0, 0x00000008},
+ {0x71, 5, 6, 41, 0x00000000},
+ {0x4f, 4, 5, 0, 0x00000000},
+ {0xbf, 5, 4, 0, 0x00000000},
+ {0x67, 5, 0, 0, 0x00000030},
+ {0xc7, 5, 0, 0, 0x00000020},
+ {0x6d, 5, 3, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x063c25f3},
+ {0x67, 4, 0, 0, 0x00000010},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x40000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x0c784be7},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x20000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x18f097cf},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x10000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x31e12f9f},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x08000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x63c25f3f},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x04000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc784be7f},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x02000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x8f097cff},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x01000000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x1e12f9fe},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00800000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3c25f3fc},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00400000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x784be7f8},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00200000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf097cff0},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00100000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe12f9fe0},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00080000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xc25f3fc1},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00040000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x84be7f83},
+ {0xbf, 3, 4, 0, 0x00000000},
+ {0x57, 3, 0, 0, 0x00020000},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x097cff07},
+ {0x57, 4, 0, 0, 0x00010000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x12f9fe0f},
+ {0x71, 3, 6, 42, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000008},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00008000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x25f3fc1f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00004000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x4be7f83f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00002000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x97cff07f},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00001000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x2f9fe0fe},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000800},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x5f3fc1fd},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000400},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xbe7f83fb},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000200},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7cff07f7},
+ {0x57, 3, 0, 0, 0x00000100},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf9fe0fee},
+ {0x71, 3, 6, 43, 0x00000000},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000080},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xf3fc1fdc},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000040},
+ {0x79, 5, 10, -56, 0x00000000},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xe7f83fb8},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000020},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xcff07f70},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000010},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x9fe0fee1},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000008},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x3fc1fdc2},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000004},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0x7f83fb85},
+ {0xbf, 4, 3, 0, 0x00000000},
+ {0x57, 4, 0, 0, 0x00000002},
+ {0x1d, 4, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xff07f70a},
+ {0x57, 3, 0, 0, 0x00000001},
+ {0x1d, 3, 2, 1, 0x00000000},
+ {0xa7, 1, 0, 0, 0xfe0fee15},
+ {0x71, 2, 0, 201, 0x00000000},
+ {0x67, 2, 0, 0, 0x00000008},
+ {0x71, 3, 0, 200, 0x00000000},
+ {0x4f, 2, 3, 0, 0x00000000},
+ {0x71, 3, 0, 203, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000008},
+ {0x71, 4, 0, 202, 0x00000000},
+ {0x4f, 3, 4, 0, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000010},
+ {0x4f, 3, 2, 0, 0x00000000},
+ {0x67, 1, 0, 0, 0x00000020},
+ {0x77, 1, 0, 0, 0x00000020},
+ {0xbf, 2, 1, 0, 0x00000000},
+ {0x3f, 2, 3, 0, 0x00000000},
+ {0x2f, 2, 3, 0, 0x00000000},
+ {0x1f, 1, 2, 0, 0x00000000},
+ {0x57, 1, 0, 0, 0x0000000f},
+ {0x67, 1, 0, 0, 0x00000002},
+ {0x0f, 0, 1, 0, 0x00000000},
+ {0x71, 1, 0, 137, 0x00000000},
+ {0x67, 1, 0, 0, 0x00000008},
+ {0x71, 2, 0, 136, 0x00000000},
+ {0x4f, 1, 2, 0, 0x00000000},
+ {0x71, 2, 0, 138, 0x00000000},
+ {0x71, 3, 0, 139, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000008},
+ {0x4f, 3, 2, 0, 0x00000000},
+ {0x67, 3, 0, 0, 0x00000010},
+ {0x4f, 3, 1, 0, 0x00000000},
+ {0x07, 3, 0, 0, 0x7cafe800},
+ {0x63, 5, 3, 52, 0x00000000},
+ {0xb7, 7, 0, 0, 0x00000001},
+ {0xbf, 0, 7, 0, 0x00000000},
+ {0x95, 0, 0, 0, 0x00000000},
+};
diff --git a/drivers/net/tap/tap_bpf_program.c b/drivers/net/tap/tap_bpf_program.c
new file mode 100644
index 00000000..8abb3b76
--- /dev/null
+++ b/drivers/net/tap/tap_bpf_program.c
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+ * Copyright 2017 Mellanox Technologies, Ltd.
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <asm/types.h>
+#include <linux/in.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_tunnel.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+
+#include "tap_rss.h"
+
+/** Create IPv4 address */
+#define IPv4(a, b, c, d) ((__u32)(((a) & 0xff) << 24) | \
+ (((b) & 0xff) << 16) | \
+ (((c) & 0xff) << 8) | \
+ ((d) & 0xff))
+
+#define PORT(a, b) ((__u16)(((a) & 0xff) << 8) | \
+ ((b) & 0xff))
+
+/*
+ * The queue number is offset by a unique QUEUE_OFFSET, to distinguish
+ * packets that have gone through this rule (skb->cb[1] != 0) from others.
+ */
+#define QUEUE_OFFSET 0x7cafe800
+#define PIN_GLOBAL_NS 2
+
+#define KEY_IDX 0
+#define BPF_MAP_ID_KEY 1
+
+struct vlan_hdr {
+ __be16 proto;
+ __be16 tci;
+};
+
+struct bpf_elf_map __attribute__((section("maps"), used))
+map_keys = {
+ .type = BPF_MAP_TYPE_HASH,
+ .id = BPF_MAP_ID_KEY,
+ .size_key = sizeof(__u32),
+ .size_value = sizeof(struct rss_key),
+ .max_elem = 256,
+ .pinning = PIN_GLOBAL_NS,
+};
+
+__section("cls_q") int
+match_q(struct __sk_buff *skb)
+{
+ __u32 queue = skb->cb[1];
+ volatile __u32 q = 0xdeadbeef;
+ __u32 match_queue = QUEUE_OFFSET + q;
+
+ /* printt("match_q$i() queue = %d\n", queue); */
+
+ if (queue != match_queue)
+ return TC_ACT_OK;
+
+ /* queue match */
+ skb->cb[1] = 0;
+ return TC_ACT_UNSPEC;
+}
+
+
+struct ipv4_l3_l4_tuple {
+ __u32 src_addr;
+ __u32 dst_addr;
+ __u16 dport;
+ __u16 sport;
+} __attribute__((packed));
+
+struct ipv6_l3_l4_tuple {
+ __u8 src_addr[16];
+ __u8 dst_addr[16];
+ __u16 dport;
+ __u16 sport;
+} __attribute__((packed));
+
+static const __u8 def_rss_key[] = {
+ 0xd1, 0x81, 0xc6, 0x2c,
+ 0xf7, 0xf4, 0xdb, 0x5b,
+ 0x19, 0x83, 0xa2, 0xfc,
+ 0x94, 0x3e, 0x1a, 0xdb,
+ 0xd9, 0x38, 0x9e, 0x6b,
+ 0xd1, 0x03, 0x9c, 0x2c,
+ 0xa7, 0x44, 0x99, 0xad,
+ 0x59, 0x3d, 0x56, 0xd9,
+ 0xf3, 0x25, 0x3c, 0x06,
+ 0x2a, 0xdc, 0x1f, 0xfc,
+};
+
+static __u32 __attribute__((always_inline))
+rte_softrss_be(const __u32 *input_tuple, const uint8_t *rss_key,
+ __u8 input_len)
+{
+ __u32 i, j, hash = 0;
+#pragma unroll
+ for (j = 0; j < input_len; j++) {
+#pragma unroll
+ for (i = 0; i < 32; i++) {
+ if (input_tuple[j] & (1 << (31 - i))) {
+ hash ^= ((const __u32 *)def_rss_key)[j] << i |
+ (__u32)((uint64_t)
+ (((const __u32 *)def_rss_key)[j + 1])
+ >> (32 - i));
+ }
+ }
+ }
+ return hash;
+}
+
+static int __attribute__((always_inline))
+rss_l3_l4(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ __u16 proto = (__u16)skb->protocol;
+ __u32 key_idx = 0xdeadbeef;
+ __u32 hash;
+ struct rss_key *rsskey;
+ __u64 off = ETH_HLEN;
+ int j;
+ __u8 *key = 0;
+ __u32 len;
+ __u32 queue = 0;
+
+ rsskey = map_lookup_elem(&map_keys, &key_idx);
+ if (!rsskey) {
+ printt("hash(): rss key is not configured\n");
+ return TC_ACT_OK;
+ }
+ key = (__u8 *)rsskey->key;
+
+ /* Get correct proto for 802.1ad */
+ if (skb->vlan_present && skb->vlan_proto == htons(ETH_P_8021AD)) {
+ if (data + ETH_ALEN * 2 + sizeof(struct vlan_hdr) +
+ sizeof(proto) > data_end)
+ return TC_ACT_OK;
+ proto = *(__u16 *)(data + ETH_ALEN * 2 +
+ sizeof(struct vlan_hdr));
+ off += sizeof(struct vlan_hdr);
+ }
+
+ if (proto == htons(ETH_P_IP)) {
+ if (data + off + sizeof(struct iphdr) + sizeof(__u32)
+ > data_end)
+ return TC_ACT_OK;
+
+ __u8 *src_dst_addr = data + off + offsetof(struct iphdr, saddr);
+ __u8 *src_dst_port = data + off + sizeof(struct iphdr);
+ struct ipv4_l3_l4_tuple v4_tuple = {
+ .src_addr = IPv4(*(src_dst_addr + 0),
+ *(src_dst_addr + 1),
+ *(src_dst_addr + 2),
+ *(src_dst_addr + 3)),
+ .dst_addr = IPv4(*(src_dst_addr + 4),
+ *(src_dst_addr + 5),
+ *(src_dst_addr + 6),
+ *(src_dst_addr + 7)),
+ .sport = PORT(*(src_dst_port + 0),
+ *(src_dst_port + 1)),
+ .dport = PORT(*(src_dst_port + 2),
+ *(src_dst_port + 3)),
+ };
+ __u8 input_len = sizeof(v4_tuple) / sizeof(__u32);
+ if (rsskey->hash_fields & (1 << HASH_FIELD_IPV4_L3))
+ input_len--;
+ hash = rte_softrss_be((__u32 *)&v4_tuple, key, 3);
+ } else if (proto == htons(ETH_P_IPV6)) {
+ if (data + off + sizeof(struct ipv6hdr) +
+ sizeof(__u32) > data_end)
+ return TC_ACT_OK;
+ __u8 *src_dst_addr = data + off +
+ offsetof(struct ipv6hdr, saddr);
+ __u8 *src_dst_port = data + off +
+ sizeof(struct ipv6hdr);
+ struct ipv6_l3_l4_tuple v6_tuple;
+ for (j = 0; j < 4; j++)
+ *((uint32_t *)&v6_tuple.src_addr + j) =
+ __builtin_bswap32(*((uint32_t *)
+ src_dst_addr + j));
+ for (j = 0; j < 4; j++)
+ *((uint32_t *)&v6_tuple.dst_addr + j) =
+ __builtin_bswap32(*((uint32_t *)
+ src_dst_addr + 4 + j));
+ v6_tuple.sport = PORT(*(src_dst_port + 0),
+ *(src_dst_port + 1));
+ v6_tuple.dport = PORT(*(src_dst_port + 2),
+ *(src_dst_port + 3));
+
+ __u8 input_len = sizeof(v6_tuple) / sizeof(__u32);
+ if (rsskey->hash_fields & (1 << HASH_FIELD_IPV6_L3))
+ input_len--;
+ hash = rte_softrss_be((__u32 *)&v6_tuple, key, 9);
+ } else {
+ return TC_ACT_PIPE;
+ }
+
+ queue = rsskey->queues[(hash % rsskey->nb_queues) &
+ (TAP_MAX_QUEUES - 1)];
+ skb->cb[1] = QUEUE_OFFSET + queue;
+ /* printt(">>>>> rss_l3_l4 hash=0x%x queue=%u\n", hash, queue); */
+
+ return TC_ACT_RECLASSIFY;
+}
+
+#define RSS(L) \
+ __section(#L) int \
+ L ## _hash(struct __sk_buff *skb) \
+ { \
+ return rss_ ## L (skb); \
+ }
+
+RSS(l3_l4)
+
+BPF_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index ffc0b85b..551b2d83 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -1,39 +1,13 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#include <errno.h>
#include <string.h>
+#include <unistd.h>
#include <sys/queue.h>
+#include <sys/resource.h>
#include <rte_byteorder.h>
#include <rte_jhash.h>
@@ -42,6 +16,7 @@
#include <tap_flow.h>
#include <tap_autoconf.h>
#include <tap_tcmsgs.h>
+#include <tap_rss.h>
#ifndef HAVE_TC_FLOWER
/*
@@ -81,12 +56,80 @@ enum {
TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
};
#endif
+/*
+ * For kernels < 4.2 BPF related enums may not be defined.
+ * Runtime checks will be carried out to gracefully report on TC messages that
+ * are rejected by the kernel. Rejection reasons may be due to:
+ * 1. enum is not defined
+ * 2. enum is defined but kernel is not configured to support BPF system calls,
+ * BPF classifications or BPF actions.
+ */
+#ifndef HAVE_TC_BPF
+enum {
+ TCA_BPF_UNSPEC,
+ TCA_BPF_ACT,
+ TCA_BPF_POLICE,
+ TCA_BPF_CLASSID,
+ TCA_BPF_OPS_LEN,
+ TCA_BPF_OPS,
+};
+#endif
+#ifndef HAVE_TC_BPF_FD
+enum {
+ TCA_BPF_FD = TCA_BPF_OPS + 1,
+ TCA_BPF_NAME,
+};
+#endif
+#ifndef HAVE_TC_ACT_BPF
+#define tc_gen \
+ __u32 index; \
+ __u32 capab; \
+ int action; \
+ int refcnt; \
+ int bindcnt
+
+struct tc_act_bpf {
+ tc_gen;
+};
+
+enum {
+ TCA_ACT_BPF_UNSPEC,
+ TCA_ACT_BPF_TM,
+ TCA_ACT_BPF_PARMS,
+ TCA_ACT_BPF_OPS_LEN,
+ TCA_ACT_BPF_OPS,
+};
+
+#endif
+#ifndef HAVE_TC_ACT_BPF_FD
+enum {
+ TCA_ACT_BPF_FD = TCA_ACT_BPF_OPS + 1,
+ TCA_ACT_BPF_NAME,
+};
+#endif
+
+/* RSS key management */
+enum bpf_rss_key_e {
+ KEY_CMD_GET = 1,
+ KEY_CMD_RELEASE,
+ KEY_CMD_INIT,
+ KEY_CMD_DEINIT,
+};
+
+enum key_status_e {
+ KEY_STAT_UNSPEC,
+ KEY_STAT_USED,
+ KEY_STAT_AVAILABLE,
+};
#define ISOLATE_HANDLE 1
+#define REMOTE_PROMISCUOUS_HANDLE 2
struct rte_flow {
LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
struct rte_flow *remote_flow; /* associated remote flow */
+ int bpf_fd[SEC_MAX]; /* list of bfs fds per ELF section */
+ uint32_t key_idx; /* RSS rule key index into BPF map */
struct nlmsg msg;
};
@@ -104,6 +147,24 @@ struct remote_rule {
int mirred;
};
+struct action_data {
+ char id[16];
+
+ union {
+ struct tc_gact gact;
+ struct tc_mirred mirred;
+ struct skbedit {
+ struct tc_skbedit skbedit;
+ uint16_t queue;
+ } skbedit;
+ struct bpf {
+ struct tc_act_bpf bpf;
+ int bpf_fd;
+ const char *annotation;
+ } bpf;
+ };
+};
+
static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
@@ -124,6 +185,10 @@ tap_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static void
+tap_flow_free(struct pmd_internals *pmd,
+ struct rte_flow *flow);
+
static int
tap_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
@@ -134,6 +199,14 @@ tap_flow_isolate(struct rte_eth_dev *dev,
int set,
struct rte_flow_error *error);
+static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx);
+static int rss_enable(struct pmd_internals *pmd,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
+ const struct rte_flow_action_rss *rss,
+ struct rte_flow_error *error);
+
static const struct rte_flow_ops tap_flow_ops = {
.validate = tap_flow_validate,
.create = tap_flow_create,
@@ -465,16 +538,16 @@ tap_flow_create_eth(const struct rte_flow_item *item, void *data)
return 0;
msg = &flow->msg;
if (!is_zero_ether_addr(&spec->dst)) {
- nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
&spec->dst.addr_bytes);
- nlattr_add(&msg->nh,
+ tap_nlattr_add(&msg->nh,
TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
&mask->dst.addr_bytes);
}
if (!is_zero_ether_addr(&mask->src)) {
- nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
&spec->src.addr_bytes);
- nlattr_add(&msg->nh,
+ tap_nlattr_add(&msg->nh,
TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
&mask->src.addr_bytes);
}
@@ -526,9 +599,11 @@ tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
uint8_t vid = VLAN_ID(tci);
if (prio)
- nlattr_add8(&msg->nh, TCA_FLOWER_KEY_VLAN_PRIO, prio);
+ tap_nlattr_add8(&msg->nh,
+ TCA_FLOWER_KEY_VLAN_PRIO, prio);
if (vid)
- nlattr_add16(&msg->nh, TCA_FLOWER_KEY_VLAN_ID, vid);
+ tap_nlattr_add16(&msg->nh,
+ TCA_FLOWER_KEY_VLAN_ID, vid);
}
return 0;
}
@@ -571,19 +646,19 @@ tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
if (!spec)
return 0;
if (spec->hdr.dst_addr) {
- nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
+ tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
spec->hdr.dst_addr);
- nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
+ tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
mask->hdr.dst_addr);
}
if (spec->hdr.src_addr) {
- nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
+ tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
spec->hdr.src_addr);
- nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
mask->hdr.src_addr);
}
if (spec->hdr.next_proto_id)
- nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
+ tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
spec->hdr.next_proto_id);
return 0;
}
@@ -627,19 +702,20 @@ tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
if (!spec)
return 0;
if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
- nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
- nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
}
if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
- nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
- nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
}
if (spec->hdr.proto)
- nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
+ tap_nlattr_add8(&msg->nh,
+ TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
return 0;
}
@@ -677,14 +753,14 @@ tap_flow_create_udp(const struct rte_flow_item *item, void *data)
if (!flow)
return 0;
msg = &flow->msg;
- nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
+ tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
if (!spec)
return 0;
if (spec->hdr.dst_port & mask->hdr.dst_port)
- nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
+ tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
spec->hdr.dst_port);
if (spec->hdr.src_port & mask->hdr.src_port)
- nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
+ tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
spec->hdr.src_port);
return 0;
}
@@ -723,14 +799,14 @@ tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
if (!flow)
return 0;
msg = &flow->msg;
- nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
+ tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
if (!spec)
return 0;
if (spec->hdr.dst_port & mask->hdr.dst_port)
- nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
+ tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
spec->hdr.dst_port);
if (spec->hdr.src_port & mask->hdr.src_port)
- nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
+ tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
spec->hdr.src_port);
return 0;
}
@@ -816,112 +892,98 @@ tap_flow_item_validate(const struct rte_flow_item *item,
}
/**
- * Transform a DROP/PASSTHRU action item in the provided flow for TC.
+ * Configure the kernel with a TC action and its configured parameters
+ * Handled actions: "gact", "mirred", "skbedit", "bpf"
*
- * @param[in, out] flow
- * Flow to be filled.
- * @param[in] action
- * Appropriate action to be set in the TCA_GACT_PARMS structure.
+ * @param[in] flow
+ * Pointer to rte flow containing the netlink message
*
- * @return
- * 0 if checks are alright, -1 otherwise.
- */
-static int
-add_action_gact(struct rte_flow *flow, int action)
-{
- struct nlmsg *msg = &flow->msg;
- size_t act_index = 1;
- struct tc_gact p = {
- .action = action
- };
-
- if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
- return -1;
- if (nlattr_nested_start(msg, act_index++) < 0)
- return -1;
- nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("gact"), "gact");
- if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
- return -1;
- nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(p), &p);
- nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
- nlattr_nested_finish(msg); /* nested act_index */
- nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
- return 0;
-}
-
-/**
- * Transform a MIRRED action item in the provided flow for TC.
+ * @param[in, out] act_index
+ * Pointer to action sequence number in the TC command
*
- * @param[in, out] flow
- * Flow to be filled.
- * @param[in] ifindex
- * Netdevice ifindex, where to mirror/redirect packet to.
- * @param[in] action_type
- * Either TCA_EGRESS_REDIR for redirection or TCA_EGRESS_MIRROR for mirroring.
+ * @param[in] adata
+ * Pointer to struct holding the action parameters
*
* @return
- * 0 if checks are alright, -1 otherwise.
+ * -1 on failure, 0 on success
*/
static int
-add_action_mirred(struct rte_flow *flow, uint16_t ifindex, uint16_t action_type)
+add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
{
struct nlmsg *msg = &flow->msg;
- size_t act_index = 1;
- struct tc_mirred p = {
- .eaction = action_type,
- .ifindex = ifindex,
- };
- if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
+ if (tap_nlattr_nested_start(msg, (*act_index)++) < 0)
return -1;
- if (nlattr_nested_start(msg, act_index++) < 0)
+
+ tap_nlattr_add(&msg->nh, TCA_ACT_KIND,
+ strlen(adata->id) + 1, adata->id);
+ if (tap_nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
return -1;
- nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("mirred"), "mirred");
- if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
+ if (strcmp("gact", adata->id) == 0) {
+ tap_nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(adata->gact),
+ &adata->gact);
+ } else if (strcmp("mirred", adata->id) == 0) {
+ if (adata->mirred.eaction == TCA_EGRESS_MIRROR)
+ adata->mirred.action = TC_ACT_PIPE;
+ else /* REDIRECT */
+ adata->mirred.action = TC_ACT_STOLEN;
+ tap_nlattr_add(&msg->nh, TCA_MIRRED_PARMS,
+ sizeof(adata->mirred),
+ &adata->mirred);
+ } else if (strcmp("skbedit", adata->id) == 0) {
+ tap_nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS,
+ sizeof(adata->skbedit.skbedit),
+ &adata->skbedit.skbedit);
+ tap_nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING,
+ adata->skbedit.queue);
+ } else if (strcmp("bpf", adata->id) == 0) {
+ tap_nlattr_add32(&msg->nh, TCA_ACT_BPF_FD, adata->bpf.bpf_fd);
+ tap_nlattr_add(&msg->nh, TCA_ACT_BPF_NAME,
+ strlen(adata->bpf.annotation) + 1,
+ adata->bpf.annotation);
+ tap_nlattr_add(&msg->nh, TCA_ACT_BPF_PARMS,
+ sizeof(adata->bpf.bpf),
+ &adata->bpf.bpf);
+ } else {
return -1;
- if (action_type == TCA_EGRESS_MIRROR)
- p.action = TC_ACT_PIPE;
- else /* REDIRECT */
- p.action = TC_ACT_STOLEN;
- nlattr_add(&msg->nh, TCA_MIRRED_PARMS, sizeof(p), &p);
- nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
- nlattr_nested_finish(msg); /* nested act_index */
- nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
+ }
+ tap_nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
+ tap_nlattr_nested_finish(msg); /* nested act_index */
return 0;
}
/**
- * Transform a QUEUE action item in the provided flow for TC.
+ * Helper function to send a serie of TC actions to the kernel
*
- * @param[in, out] flow
- * Flow to be filled.
- * @param[in] queue
- * Queue id to use.
+ * @param[in] flow
+ * Pointer to rte flow containing the netlink message
+ *
+ * @param[in] nb_actions
+ * Number of actions in an array of action structs
+ *
+ * @param[in] data
+ * Pointer to an array of action structs
+ *
+ * @param[in] classifier_actions
+ * The classifier on behave of which the actions are configured
*
* @return
- * 0 if checks are alright, -1 otherwise.
+ * -1 on failure, 0 on success
*/
static int
-add_action_skbedit(struct rte_flow *flow, uint16_t queue)
+add_actions(struct rte_flow *flow, int nb_actions, struct action_data *data,
+ int classifier_action)
{
struct nlmsg *msg = &flow->msg;
size_t act_index = 1;
- struct tc_skbedit p = {
- .action = TC_ACT_PIPE
- };
+ int i;
- if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
- return -1;
- if (nlattr_nested_start(msg, act_index++) < 0)
+ if (tap_nlattr_nested_start(msg, classifier_action) < 0)
return -1;
- nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("skbedit"), "skbedit");
- if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
- return -1;
- nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS, sizeof(p), &p);
- nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING, queue);
- nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
- nlattr_nested_finish(msg); /* nested act_index */
- nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
+ for (i = 0; i < nb_actions; i++)
+ if (add_action(flow, &act_index, data + i) < 0)
+ return -1;
+ tap_nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
return 0;
}
@@ -984,7 +1046,8 @@ priv_flow_process(struct pmd_internals *pmd,
return -rte_errno;
} else if (flow) {
uint16_t group = attr->group << GROUP_SHIFT;
- uint16_t prio = group | (attr->priority + PRIORITY_OFFSET);
+ uint16_t prio = group | (attr->priority +
+ RSS_PRIORITY_OFFSET + PRIORITY_OFFSET);
flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
flow->msg.t.tcm_info);
}
@@ -1004,8 +1067,8 @@ priv_flow_process(struct pmd_internals *pmd,
TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
}
/* use flower filter type */
- nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
- if (nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
+ tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
+ if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
goto exit_item_not_supported;
}
for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
@@ -1041,19 +1104,24 @@ priv_flow_process(struct pmd_internals *pmd,
}
if (flow) {
if (data.vlan) {
- nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
+ tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
htons(ETH_P_8021Q));
- nlattr_add16(&flow->msg.nh,
+ tap_nlattr_add16(&flow->msg.nh,
TCA_FLOWER_KEY_VLAN_ETH_TYPE,
data.eth_type ?
data.eth_type : htons(ETH_P_ALL));
} else if (data.eth_type) {
- nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
+ tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
data.eth_type);
}
}
if (mirred && flow) {
- uint16_t if_index = pmd->if_index;
+ struct action_data adata = {
+ .id = "mirred",
+ .mirred = {
+ .eaction = mirred,
+ },
+ };
/*
* If attr->egress && mirred, then this is a special
@@ -1061,9 +1129,13 @@ priv_flow_process(struct pmd_internals *pmd,
* redirect packets coming from the DPDK App, out
* through the remote netdevice.
*/
- if (attr->egress)
- if_index = pmd->remote_if_index;
- if (add_action_mirred(flow, if_index, mirred) < 0)
+ adata.mirred.ifindex = attr->ingress ? pmd->if_index :
+ pmd->remote_if_index;
+ if (mirred == TCA_EGRESS_MIRROR)
+ adata.mirred.action = TC_ACT_PIPE;
+ else
+ adata.mirred.action = TC_ACT_STOLEN;
+ if (add_actions(flow, 1, &adata, TCA_FLOWER_ACT) < 0)
goto exit_action_not_supported;
else
goto end;
@@ -1077,14 +1149,33 @@ priv_flow_process(struct pmd_internals *pmd,
if (action)
goto exit_action_not_supported;
action = 1;
- if (flow)
- err = add_action_gact(flow, TC_ACT_SHOT);
+ if (flow) {
+ struct action_data adata = {
+ .id = "gact",
+ .gact = {
+ .action = TC_ACT_SHOT,
+ },
+ };
+
+ err = add_actions(flow, 1, &adata,
+ TCA_FLOWER_ACT);
+ }
} else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
if (action)
goto exit_action_not_supported;
action = 1;
- if (flow)
- err = add_action_gact(flow, TC_ACT_UNSPEC);
+ if (flow) {
+ struct action_data adata = {
+ .id = "gact",
+ .gact = {
+ /* continue */
+ .action = TC_ACT_UNSPEC,
+ },
+ };
+
+ err = add_actions(flow, 1, &adata,
+ TCA_FLOWER_ACT);
+ }
} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
@@ -1096,22 +1187,35 @@ priv_flow_process(struct pmd_internals *pmd,
if (!queue ||
(queue->index > pmd->dev->data->nb_rx_queues - 1))
goto exit_action_not_supported;
- if (flow)
- err = add_action_skbedit(flow, queue->index);
+ if (flow) {
+ struct action_data adata = {
+ .id = "skbedit",
+ .skbedit = {
+ .skbedit = {
+ .action = TC_ACT_PIPE,
+ },
+ .queue = queue->index,
+ },
+ };
+
+ err = add_actions(flow, 1, &adata,
+ TCA_FLOWER_ACT);
+ }
} else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
- /* Fake RSS support. */
const struct rte_flow_action_rss *rss =
(const struct rte_flow_action_rss *)
actions->conf;
- if (action)
+ if (action++)
goto exit_action_not_supported;
- action = 1;
- if (!rss || rss->num < 1 ||
- (rss->queue[0] > pmd->dev->data->nb_rx_queues - 1))
- goto exit_action_not_supported;
- if (flow)
- err = add_action_skbedit(flow, rss->queue[0]);
+
+ if (!pmd->rss_enabled) {
+ err = rss_enable(pmd, attr, error);
+ if (err)
+ goto exit_action_not_supported;
+ }
+ if (flow && rss)
+ err = rss_add_actions(flow, pmd, rss, error);
} else {
goto exit_action_not_supported;
}
@@ -1120,7 +1224,7 @@ priv_flow_process(struct pmd_internals *pmd,
}
end:
if (flow)
- nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
+ tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
return 0;
exit_item_not_supported:
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1184,6 +1288,38 @@ tap_flow_set_handle(struct rte_flow *flow)
}
/**
+ * Free the flow opened file descriptors and allocated memory
+ *
+ * @param[in] flow
+ * Pointer to the flow to free
+ *
+ */
+static void
+tap_flow_free(struct pmd_internals *pmd, struct rte_flow *flow)
+{
+ int i;
+
+ if (!flow)
+ return;
+
+ if (pmd->rss_enabled) {
+ /* Close flow BPF file descriptors */
+ for (i = 0; i < SEC_MAX; i++)
+ if (flow->bpf_fd[i] != 0) {
+ close(flow->bpf_fd[i]);
+ flow->bpf_fd[i] = 0;
+ }
+
+ /* Release the map key for this RSS rule */
+ bpf_rss_key(KEY_CMD_RELEASE, &flow->key_idx);
+ flow->key_idx = 0;
+ }
+
+ /* Free flow allocated memory */
+ rte_free(flow);
+}
+
+/**
* Create a flow.
*
* @see rte_flow_create()
@@ -1232,13 +1368,13 @@ tap_flow_create(struct rte_eth_dev *dev,
tap_flow_set_handle(flow);
if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
goto fail;
- err = nl_send(pmd->nlsk_fd, &msg->nh);
+ err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
if (err < 0) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "couldn't send request to kernel");
goto fail;
}
- err = nl_recv_ack(pmd->nlsk_fd);
+ err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
RTE_LOG(ERR, PMD,
"Kernel refused TC filter rule creation (%d): %s\n",
@@ -1276,14 +1412,14 @@ tap_flow_create(struct rte_eth_dev *dev,
NULL, "rte flow rule validation failed");
goto fail;
}
- err = nl_send(pmd->nlsk_fd, &msg->nh);
+ err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
if (err < 0) {
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "Failure sending nl request");
goto fail;
}
- err = nl_recv_ack(pmd->nlsk_fd);
+ err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
RTE_LOG(ERR, PMD,
"Kernel refused TC filter rule creation (%d): %s\n",
@@ -1301,7 +1437,7 @@ fail:
if (remote_flow)
rte_free(remote_flow);
if (flow)
- rte_free(flow);
+ tap_flow_free(pmd, flow);
return NULL;
}
@@ -1329,13 +1465,13 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd,
flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
- ret = nl_send(pmd->nlsk_fd, &flow->msg.nh);
+ ret = tap_nl_send(pmd->nlsk_fd, &flow->msg.nh);
if (ret < 0) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "couldn't send request to kernel");
goto end;
}
- ret = nl_recv_ack(pmd->nlsk_fd);
+ ret = tap_nl_recv_ack(pmd->nlsk_fd);
/* If errno is ENOENT, the rule is already no longer in the kernel. */
if (ret < 0 && errno == ENOENT)
ret = 0;
@@ -1348,18 +1484,19 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd,
"couldn't receive kernel ack to our request");
goto end;
}
+
if (remote_flow) {
remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
- ret = nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
+ ret = tap_nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
if (ret < 0) {
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "Failure sending nl request");
goto end;
}
- ret = nl_recv_ack(pmd->nlsk_fd);
+ ret = tap_nl_recv_ack(pmd->nlsk_fd);
if (ret < 0 && errno == ENOENT)
ret = 0;
if (ret < 0) {
@@ -1375,7 +1512,7 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd,
end:
if (remote_flow)
rte_free(remote_flow);
- rte_free(flow);
+ tap_flow_free(pmd, flow);
return ret;
}
@@ -1556,9 +1693,15 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
* The ISOLATE rule is always present and must have a static handle, as
* the action is changed whether the feature is enabled (DROP) or
* disabled (PASSTHRU).
+ * There is just one REMOTE_PROMISCUOUS rule in all cases. It should
+ * have a static handle such that adding it twice will fail with EEXIST
+ * with any kernel version. Remark: old kernels may falsely accept the
+ * same REMOTE_PROMISCUOUS rules if they had different handles.
*/
if (idx == TAP_ISOLATE)
remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE;
+ else if (idx == TAP_REMOTE_PROMISC)
+ remote_flow->msg.t.tcm_handle = REMOTE_PROMISCUOUS_HANDLE;
else
tap_flow_set_handle(remote_flow);
if (priv_flow_process(pmd, attr, items, actions, NULL,
@@ -1566,19 +1709,23 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
goto fail;
}
- err = nl_send(pmd->nlsk_fd, &msg->nh);
+ err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
if (err < 0) {
RTE_LOG(ERR, PMD, "Failure sending nl request\n");
goto fail;
}
- err = nl_recv_ack(pmd->nlsk_fd);
+ err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
+ /* Silently ignore re-entering remote promiscuous rule */
+ if (errno == EEXIST && idx == TAP_REMOTE_PROMISC)
+ goto success;
RTE_LOG(ERR, PMD,
"Kernel refused TC filter rule creation (%d): %s\n",
errno, strerror(errno));
goto fail;
}
LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
+success:
return 0;
fail:
if (remote_flow)
@@ -1632,6 +1779,346 @@ tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
return 0;
}
+#define MAX_RSS_KEYS 256
+#define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS)
+#define SEC_NAME_CLS_Q "cls_q"
+
+const char *sec_name[SEC_MAX] = {
+ [SEC_L3_L4] = "l3_l4",
+};
+
+/**
+ * Enable RSS on tap: create TC rules for queuing.
+ *
+ * @param[in, out] pmd
+ * Pointer to private structure.
+ *
+ * @param[in] attr
+ * Pointer to rte_flow to get flow group
+ *
+ * @param[out] error
+ * Pointer to error reporting if not NULL.
+ *
+ * @return 0 on success, negative value on failure.
+ */
+static int rss_enable(struct pmd_internals *pmd,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rss_flow = NULL;
+ struct nlmsg *msg = NULL;
+ /* 4096 is the maximum number of instructions for a BPF program */
+ char annotation[64];
+ int i;
+ int err = 0;
+
+ /* unlimit locked memory */
+ struct rlimit memlock_limit = {
+ .rlim_cur = RLIM_INFINITY,
+ .rlim_max = RLIM_INFINITY,
+ };
+ setrlimit(RLIMIT_MEMLOCK, &memlock_limit);
+
+ /* Get a new map key for a new RSS rule */
+ err = bpf_rss_key(KEY_CMD_INIT, NULL);
+ if (err < 0) {
+ rte_flow_error_set(
+ error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to initialize BPF RSS keys");
+
+ return -1;
+ }
+
+ /*
+ * Create BPF RSS MAP
+ */
+ pmd->map_fd = tap_flow_bpf_rss_map_create(sizeof(__u32), /* key size */
+ sizeof(struct rss_key),
+ MAX_RSS_KEYS);
+ if (pmd->map_fd < 0) {
+ RTE_LOG(ERR, PMD,
+ "Failed to create BPF map (%d): %s\n",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Kernel too old or not configured "
+ "to support BPF maps");
+
+ return -ENOTSUP;
+ }
+
+ /*
+ * Add a rule per queue to match reclassified packets and direct them to
+ * the correct queue.
+ */
+ for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
+ pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
+ if (pmd->bpf_fd[i] < 0) {
+ RTE_LOG(ERR, PMD,
+ "Failed to load BPF section %s for queue %d",
+ SEC_NAME_CLS_Q, i);
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Kernel too old or not configured "
+ "to support BPF programs loading");
+
+ return -ENOTSUP;
+ }
+
+ rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!rss_flow) {
+ RTE_LOG(ERR, PMD,
+ "Cannot allocate memory for rte_flow");
+ return -1;
+ }
+ msg = &rss_flow->msg;
+ tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER, NLM_F_REQUEST |
+ NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ tap_flow_set_handle(rss_flow);
+ uint16_t group = attr->group << GROUP_SHIFT;
+ uint16_t prio = group | (i + PRIORITY_OFFSET);
+ msg->t.tcm_info = TC_H_MAKE(prio << 16, msg->t.tcm_info);
+ msg->t.tcm_parent = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
+
+ tap_nlattr_add(&msg->nh, TCA_KIND, sizeof("bpf"), "bpf");
+ if (tap_nlattr_nested_start(msg, TCA_OPTIONS) < 0)
+ return -1;
+ tap_nlattr_add32(&msg->nh, TCA_BPF_FD, pmd->bpf_fd[i]);
+ snprintf(annotation, sizeof(annotation), "[%s%d]",
+ SEC_NAME_CLS_Q, i);
+ tap_nlattr_add(&msg->nh, TCA_BPF_NAME, strlen(annotation) + 1,
+ annotation);
+ /* Actions */
+ {
+ struct action_data adata = {
+ .id = "skbedit",
+ .skbedit = {
+ .skbedit = {
+ .action = TC_ACT_PIPE,
+ },
+ .queue = i,
+ },
+ };
+ if (add_actions(rss_flow, 1, &adata, TCA_BPF_ACT) < 0)
+ return -1;
+ }
+ tap_nlattr_nested_finish(msg); /* nested TCA_OPTIONS */
+
+ /* Netlink message is now ready to be sent */
+ if (tap_nl_send(pmd->nlsk_fd, &msg->nh) < 0)
+ return -1;
+ err = tap_nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule creation (%d): %s\n",
+ errno, strerror(errno));
+ return err;
+ }
+ LIST_INSERT_HEAD(&pmd->rss_flows, rss_flow, next);
+ }
+
+ pmd->rss_enabled = 1;
+ return err;
+}
+
+/**
+ * Manage bpf RSS keys repository with operations: init, get, release
+ *
+ * @param[in] cmd
+ * Command on RSS keys: init, get, release
+ *
+ * @param[in, out] key_idx
+ * Pointer to RSS Key index (out for get command, in for release command)
+ *
+ * @return -1 if couldn't get, release or init the RSS keys, 0 otherwise.
+ */
+static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
+{
+ __u32 i;
+ int err = 0;
+ static __u32 num_used_keys;
+ static __u32 rss_keys[MAX_RSS_KEYS] = {KEY_STAT_UNSPEC};
+ static __u32 rss_keys_initialized;
+ __u32 key;
+
+ switch (cmd) {
+ case KEY_CMD_GET:
+ if (!rss_keys_initialized) {
+ err = -1;
+ break;
+ }
+
+ if (num_used_keys == RTE_DIM(rss_keys)) {
+ err = -1;
+ break;
+ }
+
+ *key_idx = num_used_keys % RTE_DIM(rss_keys);
+ while (rss_keys[*key_idx] == KEY_STAT_USED)
+ *key_idx = (*key_idx + 1) % RTE_DIM(rss_keys);
+
+ rss_keys[*key_idx] = KEY_STAT_USED;
+
+ /*
+ * Add an offset to key_idx in order to handle a case of
+ * RSS and non RSS flows mixture.
+ * If a non RSS flow is destroyed it has an eBPF map
+ * index 0 (initialized on flow creation) and might
+ * unintentionally remove RSS entry 0 from eBPF map.
+ * To avoid this issue, add an offset to the real index
+ * during a KEY_CMD_GET operation and subtract this offset
+ * during a KEY_CMD_RELEASE operation in order to restore
+ * the real index.
+ */
+ *key_idx += KEY_IDX_OFFSET;
+ num_used_keys++;
+ break;
+
+ case KEY_CMD_RELEASE:
+ if (!rss_keys_initialized)
+ break;
+
+ /*
+ * Subtract offest to restore real key index
+ * If a non RSS flow is falsely trying to release map
+ * entry 0 - the offset subtraction will calculate the real
+ * map index as an out-of-range value and the release operation
+ * will be silently ignored.
+ */
+ key = *key_idx - KEY_IDX_OFFSET;
+ if (key >= RTE_DIM(rss_keys))
+ break;
+
+ if (rss_keys[key] == KEY_STAT_USED) {
+ rss_keys[key] = KEY_STAT_AVAILABLE;
+ num_used_keys--;
+ }
+ break;
+
+ case KEY_CMD_INIT:
+ for (i = 0; i < RTE_DIM(rss_keys); i++)
+ rss_keys[i] = KEY_STAT_AVAILABLE;
+
+ rss_keys_initialized = 1;
+ num_used_keys = 0;
+ break;
+
+ case KEY_CMD_DEINIT:
+ for (i = 0; i < RTE_DIM(rss_keys); i++)
+ rss_keys[i] = KEY_STAT_UNSPEC;
+
+ rss_keys_initialized = 0;
+ num_used_keys = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * Add RSS hash calculations and queue selection
+ *
+ * @param[in, out] pmd
+ * Pointer to internal structure. Used to set/get RSS map fd
+ *
+ * @param[in] rss
+ * Pointer to RSS flow actions
+ *
+ * @param[out] error
+ * Pointer to error reporting if not NULL.
+ *
+ * @return 0 on success, negative value on failure
+ */
+static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
+ const struct rte_flow_action_rss *rss,
+ struct rte_flow_error *error)
+{
+ /* 4096 is the maximum number of instructions for a BPF program */
+ int i;
+ int err;
+ struct rss_key rss_entry = { .hash_fields = 0,
+ .key_size = 0 };
+
+ /* Get a new map key for a new RSS rule */
+ err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
+ if (err < 0) {
+ rte_flow_error_set(
+ error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to get BPF RSS key");
+
+ return -1;
+ }
+
+ /* Update RSS map entry with queues */
+ rss_entry.nb_queues = rss->num;
+ for (i = 0; i < rss->num; i++)
+ rss_entry.queues[i] = rss->queue[i];
+ rss_entry.hash_fields =
+ (1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
+
+ /* Add this RSS entry to map */
+ err = tap_flow_bpf_update_rss_elem(pmd->map_fd,
+ &flow->key_idx, &rss_entry);
+
+ if (err) {
+ RTE_LOG(ERR, PMD,
+ "Failed to update BPF map entry #%u (%d): %s\n",
+ flow->key_idx, errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Kernel too old or not configured "
+ "to support BPF maps updates");
+
+ return -ENOTSUP;
+ }
+
+
+ /*
+ * Load bpf rules to calculate hash for this key_idx
+ */
+
+ flow->bpf_fd[SEC_L3_L4] =
+ tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
+ if (flow->bpf_fd[SEC_L3_L4] < 0) {
+ RTE_LOG(ERR, PMD,
+ "Failed to load BPF section %s (%d): %s\n",
+ sec_name[SEC_L3_L4], errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Kernel too old or not configured "
+ "to support BPF program loading");
+
+ return -ENOTSUP;
+ }
+
+ /* Actions */
+ {
+ struct action_data adata[] = {
+ {
+ .id = "bpf",
+ .bpf = {
+ .bpf_fd = flow->bpf_fd[SEC_L3_L4],
+ .annotation = sec_name[SEC_L3_L4],
+ .bpf = {
+ .action = TC_ACT_PIPE,
+ },
+ },
+ },
+ };
+
+ if (add_actions(flow, RTE_DIM(adata), adata,
+ TCA_FLOWER_ACT) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
/**
* Manage filter operations.
*
diff --git a/drivers/net/tap/tap_flow.h b/drivers/net/tap/tap_flow.h
index 9e332b03..ac6a952d 100644
--- a/drivers/net/tap/tap_flow.h
+++ b/drivers/net/tap/tap_flow.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#ifndef _TAP_FLOW_H_
@@ -37,6 +9,7 @@
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_eth_tap.h>
+#include <tap_autoconf.h>
/**
* In TC, priority 0 means we require the kernel to allocate one for us.
@@ -49,6 +22,7 @@
#define GROUP_MASK (0xf)
#define GROUP_SHIFT 12
#define MAX_GROUP GROUP_MASK
+#define RSS_PRIORITY_OFFSET RTE_PMD_TAP_MAX_QUEUES
/**
* These index are actually in reversed order: their priority is processed
@@ -67,6 +41,11 @@ enum implicit_rule_index {
TAP_REMOTE_MAX_IDX,
};
+enum bpf_fd_idx {
+ SEC_L3_L4,
+ SEC_MAX,
+};
+
int tap_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
@@ -80,4 +59,10 @@ int tap_flow_implicit_destroy(struct pmd_internals *pmd,
int tap_flow_implicit_flush(struct pmd_internals *pmd,
struct rte_flow_error *error);
+int tap_flow_bpf_cls_q(__u32 queue_idx);
+int tap_flow_bpf_calc_l3_l4_hash(__u32 key_idx, int map_fd);
+int tap_flow_bpf_rss_map_create(unsigned int key_size, unsigned int value_size,
+ unsigned int max_entries);
+int tap_flow_bpf_update_rss_elem(int fd, void *key, void *value);
+
#endif /* _TAP_FLOW_H_ */
diff --git a/drivers/net/tap/tap_intr.c b/drivers/net/tap/tap_intr.c
new file mode 100644
index 00000000..b0e19914
--- /dev/null
+++ b/drivers/net/tap/tap_intr.c
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd.
+ */
+
+/**
+ * @file
+ * Interrupts handling for tap driver.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <rte_eth_tap.h>
+#include <rte_errno.h>
+#include <rte_interrupts.h>
+
+
+/**
+ * Unregister Rx interrupts free the queue interrupt vector.
+ *
+ * @param dev
+ * Pointer to the tap rte_eth_dev structure.
+ */
+static void
+tap_rx_intr_vec_uninstall(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rte_intr_handle *intr_handle = &pmd->intr_handle;
+
+ rte_intr_free_epoll_fd(intr_handle);
+ free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ intr_handle->nb_efd = 0;
+}
+
+/**
+ * Allocate Rx queue interrupt vector and register Rx interrupts.
+ *
+ * @param dev
+ * Pointer to the tap rte_eth_dev device structure.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static int
+tap_rx_intr_vec_install(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ unsigned int rxqs_n = pmd->dev->data->nb_rx_queues;
+ struct rte_intr_handle *intr_handle = &pmd->intr_handle;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ unsigned int i;
+ unsigned int count = 0;
+
+ if (!dev->data->dev_conf.intr_conf.rxq)
+ return 0;
+ intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ if (intr_handle->intr_vec == NULL) {
+ rte_errno = ENOMEM;
+ RTE_LOG(ERR, PMD,
+ "failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported");
+ return -rte_errno;
+ }
+ for (i = 0; i < n; i++) {
+ struct rx_queue *rxq = pmd->dev->data->rx_queues[i];
+
+ /* Skip queues that cannot request interrupts. */
+ if (!rxq || rxq->fd <= 0) {
+ /* Use invalid intr_vec[] index to disable entry. */
+ intr_handle->intr_vec[i] =
+ RTE_INTR_VEC_RXTX_OFFSET +
+ RTE_MAX_RXTX_INTR_VEC_ID;
+ continue;
+ }
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+ intr_handle->efds[count] = rxq->fd;
+ count++;
+ }
+ if (!count)
+ tap_rx_intr_vec_uninstall(dev);
+ else
+ intr_handle->nb_efd = count;
+ return 0;
+}
+
+/**
+ * Register or unregister the Rx interrupts.
+ *
+ * @param dev
+ * Pointer to the tap rte_eth_dev device structure.
+ * @param set
+ * should the operation be register or unregister the interrupts.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+tap_rx_intr_vec_set(struct rte_eth_dev *dev, int set)
+{
+ tap_rx_intr_vec_uninstall(dev);
+ if (set)
+ return tap_rx_intr_vec_install(dev);
+ return 0;
+}
diff --git a/drivers/net/tap/tap_netlink.c b/drivers/net/tap/tap_netlink.c
index ee92e2e7..82c8dc0e 100644
--- a/drivers/net/tap/tap_netlink.c
+++ b/drivers/net/tap/tap_netlink.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#include <errno.h>
@@ -63,7 +35,7 @@ struct nested_tail {
* netlink socket file descriptor on success, -1 otherwise.
*/
int
-nl_init(uint32_t nl_groups)
+tap_nl_init(uint32_t nl_groups)
{
int fd, sndbuf_size = SNDBUF_SIZE, rcvbuf_size = RCVBUF_SIZE;
struct sockaddr_nl local = {
@@ -101,7 +73,7 @@ nl_init(uint32_t nl_groups)
* 0 on success, -1 otherwise.
*/
int
-nl_final(int nlsk_fd)
+tap_nl_final(int nlsk_fd)
{
if (close(nlsk_fd)) {
RTE_LOG(ERR, PMD, "Failed to close netlink socket: %s (%d)\n",
@@ -123,7 +95,7 @@ nl_final(int nlsk_fd)
* the number of sent bytes on success, -1 otherwise.
*/
int
-nl_send(int nlsk_fd, struct nlmsghdr *nh)
+tap_nl_send(int nlsk_fd, struct nlmsghdr *nh)
{
/* man 7 netlink EXAMPLE */
struct sockaddr_nl sa = {
@@ -153,7 +125,8 @@ nl_send(int nlsk_fd, struct nlmsghdr *nh)
}
/**
- * Check that the kernel sends an appropriate ACK in response to an nl_send().
+ * Check that the kernel sends an appropriate ACK in response
+ * to an tap_nl_send().
*
* @param[in] nlsk_fd
* The netlink socket file descriptor used for communication.
@@ -162,14 +135,14 @@ nl_send(int nlsk_fd, struct nlmsghdr *nh)
* 0 on success, -1 otherwise with errno set.
*/
int
-nl_recv_ack(int nlsk_fd)
+tap_nl_recv_ack(int nlsk_fd)
{
- return nl_recv(nlsk_fd, NULL, NULL);
+ return tap_nl_recv(nlsk_fd, NULL, NULL);
}
/**
* Receive a message from the kernel on the netlink socket, following an
- * nl_send().
+ * tap_nl_send().
*
* @param[in] nlsk_fd
* The netlink socket file descriptor used for communication.
@@ -182,7 +155,7 @@ nl_recv_ack(int nlsk_fd)
* 0 on success, -1 otherwise with errno set.
*/
int
-nl_recv(int nlsk_fd, int (*cb)(struct nlmsghdr *, void *arg), void *arg)
+tap_nl_recv(int nlsk_fd, int (*cb)(struct nlmsghdr *, void *arg), void *arg)
{
/* man 7 netlink EXAMPLE */
struct sockaddr_nl sa;
@@ -247,7 +220,7 @@ nl_recv(int nlsk_fd, int (*cb)(struct nlmsghdr *, void *arg), void *arg)
* The data to append.
*/
void
-nlattr_add(struct nlmsghdr *nh, unsigned short type,
+tap_nlattr_add(struct nlmsghdr *nh, unsigned short type,
unsigned int data_len, const void *data)
{
/* see man 3 rtnetlink */
@@ -271,9 +244,9 @@ nlattr_add(struct nlmsghdr *nh, unsigned short type,
* The data to append.
*/
void
-nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data)
+tap_nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data)
{
- nlattr_add(nh, type, sizeof(uint8_t), &data);
+ tap_nlattr_add(nh, type, sizeof(uint8_t), &data);
}
/**
@@ -287,9 +260,9 @@ nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data)
* The data to append.
*/
void
-nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data)
+tap_nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data)
{
- nlattr_add(nh, type, sizeof(uint16_t), &data);
+ tap_nlattr_add(nh, type, sizeof(uint16_t), &data);
}
/**
@@ -303,14 +276,14 @@ nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data)
* The data to append.
*/
void
-nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data)
+tap_nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data)
{
- nlattr_add(nh, type, sizeof(uint32_t), &data);
+ tap_nlattr_add(nh, type, sizeof(uint32_t), &data);
}
/**
* Start a nested netlink attribute.
- * It must be followed later by a call to nlattr_nested_finish().
+ * It must be followed later by a call to tap_nlattr_nested_finish().
*
* @param[in, out] msg
* The netlink message where to edit the nested_tails metadata.
@@ -321,7 +294,7 @@ nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data)
* -1 if adding a nested netlink attribute failed, 0 otherwise.
*/
int
-nlattr_nested_start(struct nlmsg *msg, uint16_t type)
+tap_nlattr_nested_start(struct nlmsg *msg, uint16_t type)
{
struct nested_tail *tail;
@@ -335,7 +308,7 @@ nlattr_nested_start(struct nlmsg *msg, uint16_t type)
tail->tail = (struct rtattr *)NLMSG_TAIL(&msg->nh);
- nlattr_add(&msg->nh, type, 0, NULL);
+ tap_nlattr_add(&msg->nh, type, 0, NULL);
tail->prev = msg->nested_tails;
@@ -346,7 +319,7 @@ nlattr_nested_start(struct nlmsg *msg, uint16_t type)
/**
* End a nested netlink attribute.
- * It follows a call to nlattr_nested_start().
+ * It follows a call to tap_nlattr_nested_start().
* In effect, it will modify the nested attribute length to include every bytes
* from the nested attribute start, up to here.
*
@@ -354,7 +327,7 @@ nlattr_nested_start(struct nlmsg *msg, uint16_t type)
* The netlink message where to edit the nested_tails metadata.
*/
void
-nlattr_nested_finish(struct nlmsg *msg)
+tap_nlattr_nested_finish(struct nlmsg *msg)
{
struct nested_tail *tail = msg->nested_tails;
diff --git a/drivers/net/tap/tap_netlink.h b/drivers/net/tap/tap_netlink.h
index 98e13902..fafef840 100644
--- a/drivers/net/tap/tap_netlink.h
+++ b/drivers/net/tap/tap_netlink.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#ifndef _TAP_NETLINK_H_
@@ -53,17 +25,18 @@ struct nlmsg {
#define NLMSG_TAIL(nlh) (void *)((char *)(nlh) + NLMSG_ALIGN((nlh)->nlmsg_len))
-int nl_init(uint32_t nl_groups);
-int nl_final(int nlsk_fd);
-int nl_send(int nlsk_fd, struct nlmsghdr *nh);
-int nl_recv(int nlsk_fd, int (*callback)(struct nlmsghdr *, void *), void *arg);
-int nl_recv_ack(int nlsk_fd);
-void nlattr_add(struct nlmsghdr *nh, unsigned short type,
- unsigned int data_len, const void *data);
-void nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data);
-void nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data);
-void nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data);
-int nlattr_nested_start(struct nlmsg *msg, uint16_t type);
-void nlattr_nested_finish(struct nlmsg *msg);
+int tap_nl_init(uint32_t nl_groups);
+int tap_nl_final(int nlsk_fd);
+int tap_nl_send(int nlsk_fd, struct nlmsghdr *nh);
+int tap_nl_recv(int nlsk_fd, int (*callback)(struct nlmsghdr *, void *),
+ void *arg);
+int tap_nl_recv_ack(int nlsk_fd);
+void tap_nlattr_add(struct nlmsghdr *nh, unsigned short type,
+ unsigned int data_len, const void *data);
+void tap_nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data);
+void tap_nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data);
+void tap_nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data);
+int tap_nlattr_nested_start(struct nlmsg *msg, uint16_t type);
+void tap_nlattr_nested_finish(struct nlmsg *msg);
#endif /* _TAP_NETLINK_H_ */
diff --git a/drivers/net/tap/tap_rss.h b/drivers/net/tap/tap_rss.h
new file mode 100644
index 00000000..3bb0d140
--- /dev/null
+++ b/drivers/net/tap/tap_rss.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Mellanox Technologies, Ltd.
+ */
+
+#ifndef _TAP_RSS_H_
+#define _TAP_RSS_H_
+
+#ifndef TAP_MAX_QUEUES
+#define TAP_MAX_QUEUES 16
+#endif
+
+/* hashed fields for RSS */
+enum hash_field {
+ HASH_FIELD_IPV4_L3, /* IPv4 src/dst addr */
+ HASH_FIELD_IPV4_L3_L4, /* IPv4 src/dst addr + L4 src/dst ports */
+ HASH_FIELD_IPV6_L3, /* IPv6 src/dst addr */
+ HASH_FIELD_IPV6_L3_L4, /* IPv6 src/dst addr + L4 src/dst ports */
+ HASH_FIELD_L2_SRC, /* Ethernet src addr */
+ HASH_FIELD_L2_DST, /* Ethernet dst addr */
+ HASH_FIELD_L3_SRC, /* L3 src addr */
+ HASH_FIELD_L3_DST, /* L3 dst addr */
+ HASH_FIELD_L4_SRC, /* TCP/UDP src ports */
+ HASH_FIELD_L4_DST, /* TCP/UDP dst ports */
+};
+
+struct rss_key {
+ __u8 key[128];
+ __u32 hash_fields;
+ __u32 key_size;
+ __u32 queues[TAP_MAX_QUEUES];
+ __u32 nb_queues;
+} __attribute__((packed));
+
+#endif /* _TAP_RSS_H_ */
diff --git a/drivers/net/tap/tap_tcmsgs.c b/drivers/net/tap/tap_tcmsgs.c
index d74ac805..954f13eb 100644
--- a/drivers/net/tap/tap_tcmsgs.c
+++ b/drivers/net/tap/tap_tcmsgs.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#include <inttypes.h>
@@ -107,7 +79,7 @@ qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo)
msg.t.tcm_parent = qinfo->parent;
/* if no netlink socket is provided, create one */
if (!nlsk_fd) {
- fd = nl_init(0);
+ fd = tap_nl_init(0);
if (fd < 0) {
RTE_LOG(ERR, PMD,
"Could not delete QDISC: null netlink socket\n");
@@ -116,16 +88,16 @@ qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo)
} else {
fd = nlsk_fd;
}
- if (nl_send(fd, &msg.nh) < 0)
+ if (tap_nl_send(fd, &msg.nh) < 0)
goto error;
- if (nl_recv_ack(fd) < 0)
+ if (tap_nl_recv_ack(fd) < 0)
goto error;
if (!nlsk_fd)
- return nl_final(fd);
+ return tap_nl_final(fd);
return 0;
error:
if (!nlsk_fd)
- nl_final(fd);
+ tap_nl_final(fd);
return -1;
}
@@ -150,11 +122,11 @@ qdisc_add_multiq(int nlsk_fd, uint16_t ifindex)
NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
msg.t.tcm_handle = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
msg.t.tcm_parent = TC_H_ROOT;
- nlattr_add(&msg.nh, TCA_KIND, sizeof("multiq"), "multiq");
- nlattr_add(&msg.nh, TCA_OPTIONS, sizeof(opt), &opt);
- if (nl_send(nlsk_fd, &msg.nh) < 0)
+ tap_nlattr_add(&msg.nh, TCA_KIND, sizeof("multiq"), "multiq");
+ tap_nlattr_add(&msg.nh, TCA_OPTIONS, sizeof(opt), &opt);
+ if (tap_nl_send(nlsk_fd, &msg.nh) < 0)
return -1;
- if (nl_recv_ack(nlsk_fd) < 0)
+ if (tap_nl_recv_ack(nlsk_fd) < 0)
return -1;
return 0;
}
@@ -179,10 +151,10 @@ qdisc_add_ingress(int nlsk_fd, uint16_t ifindex)
NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
msg.t.tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
msg.t.tcm_parent = TC_H_INGRESS;
- nlattr_add(&msg.nh, TCA_KIND, sizeof("ingress"), "ingress");
- if (nl_send(nlsk_fd, &msg.nh) < 0)
+ tap_nlattr_add(&msg.nh, TCA_KIND, sizeof("ingress"), "ingress");
+ if (tap_nl_send(nlsk_fd, &msg.nh) < 0)
return -1;
- if (nl_recv_ack(nlsk_fd) < 0)
+ if (tap_nl_recv_ack(nlsk_fd) < 0)
return -1;
return 0;
}
@@ -246,9 +218,9 @@ qdisc_iterate(int nlsk_fd, uint16_t ifindex,
};
tc_init_msg(&msg, ifindex, RTM_GETQDISC, NLM_F_REQUEST | NLM_F_DUMP);
- if (nl_send(nlsk_fd, &msg.nh) < 0)
+ if (tap_nl_send(nlsk_fd, &msg.nh) < 0)
return -1;
- if (nl_recv(nlsk_fd, callback, &args) < 0)
+ if (tap_nl_recv(nlsk_fd, callback, &args) < 0)
return -1;
return 0;
}
diff --git a/drivers/net/tap/tap_tcmsgs.h b/drivers/net/tap/tap_tcmsgs.h
index 78959577..f72f8c5c 100644
--- a/drivers/net/tap/tap_tcmsgs.h
+++ b/drivers/net/tap/tap_tcmsgs.h
@@ -1,39 +1,12 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*/
#ifndef _TAP_TCMSGS_H_
#define _TAP_TCMSGS_H_
+#include <tap_autoconf.h>
#include <linux/if_ether.h>
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
@@ -41,6 +14,9 @@
#include <linux/tc_act/tc_mirred.h>
#include <linux/tc_act/tc_gact.h>
#include <linux/tc_act/tc_skbedit.h>
+#ifdef HAVE_TC_ACT_BPF
+#include <linux/tc_act/tc_bpf.h>
+#endif
#include <inttypes.h>
#include <rte_ether.h>
diff --git a/drivers/net/thunderx/Makefile b/drivers/net/thunderx/Makefile
index e50e1ad8..e6bf4975 100644
--- a/drivers/net/thunderx/Makefile
+++ b/drivers/net/thunderx/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 Cavium, Inc. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Cavium, Inc nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Cavium, Inc
#
include $(RTE_SDK)/mk/rte.vars.mk
@@ -44,7 +16,7 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
-EXPORT_MAP := rte_pmd_thunderx_nicvf_version.map
+EXPORT_MAP := rte_pmd_thunderx_version.map
LIBABIVER := 1
diff --git a/drivers/net/thunderx/base/meson.build b/drivers/net/thunderx/base/meson.build
new file mode 100644
index 00000000..c9d5a8f4
--- /dev/null
+++ b/drivers/net/thunderx/base/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = [
+ 'nicvf_hw.c',
+ 'nicvf_mbox.c',
+ 'nicvf_bsvf.c'
+]
+
+base_lib = static_library('nicvf_base', sources,
+ c_args: cflags,
+ dependencies: static_rte_ethdev
+)
+
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.c b/drivers/net/thunderx/base/nicvf_bsvf.c
index a4fc04cb..df8c016a 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.c
+++ b/drivers/net/thunderx/base/nicvf_bsvf.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#include <assert.h>
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.h b/drivers/net/thunderx/base/nicvf_bsvf.h
index 2d9448aa..4c7615ca 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.h
+++ b/drivers/net/thunderx/base/nicvf_bsvf.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef __THUNDERX_NICVF_BSVF_H__
diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index dc0af1ca..ea8092ca 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#include <unistd.h>
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
index 698aa487..284d0bdf 100644
--- a/drivers/net/thunderx/base/nicvf_hw.h
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef _THUNDERX_NICVF_HW_H
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index e7e092b6..b13c21ff 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef _THUNDERX_NICVF_HW_DEFS_H
diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c
index e26319bb..8f83d41d 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.c
+++ b/drivers/net/thunderx/base/nicvf_mbox.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#include <assert.h>
diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h
index 6724ae1c..81f1f408 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.h
+++ b/drivers/net/thunderx/base/nicvf_mbox.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef __THUNDERX_NICVF_MBOX__
diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h
index f821c56f..6de07c70 100644
--- a/drivers/net/thunderx/base/nicvf_plat.h
+++ b/drivers/net/thunderx/base/nicvf_plat.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef _THUNDERX_NICVF_H
diff --git a/drivers/net/thunderx/meson.build b/drivers/net/thunderx/meson.build
new file mode 100644
index 00000000..69819a97
--- /dev/null
+++ b/drivers/net/thunderx/meson.build
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+subdir('base')
+objs = [base_objs]
+
+sources = files('nicvf_rxtx.c',
+ 'nicvf_ethdev.c',
+ 'nicvf_svf.c'
+)
+
+if cc.has_argument('-fno-prefetch-loop-arrays')
+ cflags += '-fno-prefetch-loop-arrays'
+endif
+
+if cc.has_argument('-Wno-maybe-uninitialized')
+ cflags += '-Wno-maybe-uninitialized'
+endif
+
+includes += include_directories('base')
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index d65d3cee..a65361fb 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#include <assert.h>
@@ -52,7 +24,7 @@
#include <rte_dev.h>
#include <rte_eal.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_interrupts.h>
#include <rte_log.h>
@@ -71,11 +43,32 @@
#include "nicvf_svf.h"
#include "nicvf_logs.h"
+int nicvf_logtype_mbox;
+int nicvf_logtype_init;
+int nicvf_logtype_driver;
+
static void nicvf_dev_stop(struct rte_eth_dev *dev);
static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
bool cleanup);
+RTE_INIT(nicvf_init_log);
+static void
+nicvf_init_log(void)
+{
+ nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
+ if (nicvf_logtype_mbox >= 0)
+ rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
+
+ nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init");
+ if (nicvf_logtype_init >= 0)
+ rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
+
+ nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver");
+ if (nicvf_logtype_driver >= 0)
+ rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
+}
+
static inline int
nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *link)
@@ -100,7 +93,7 @@ nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
else if (nic->duplex == NICVF_FULL_DUPLEX)
link->link_duplex = ETH_LINK_FULL_DUPLEX;
link->link_speed = nic->speed;
- link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
+ link->link_autoneg = ETH_LINK_AUTONEG;
}
static void
@@ -113,7 +106,7 @@ nicvf_interrupt(void *arg)
if (dev->data->dev_conf.intr_conf.lsc)
nicvf_set_eth_link_status(nic, &dev->data->dev_link);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
@@ -179,6 +172,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
struct nicvf *nic = nicvf_pmd_priv(dev);
uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
size_t i;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
PMD_INIT_FUNC_TRACE();
@@ -204,15 +198,15 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
if (frame_size > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
return -EINVAL;
/* Update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
+ rxmode->max_rx_pkt_len = (uint32_t)frame_size;
nic->mtu = mtu;
for (i = 0; i < nic->sqs_count; i++)
@@ -903,7 +897,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
+ if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
multiseg = true;
break;
}
@@ -942,9 +936,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
const struct rte_eth_txconf *tx_conf)
{
uint16_t tx_free_thresh;
- uint8_t is_single_pool;
+ bool is_single_pool;
struct nicvf_txq *txq;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t conf_offloads, offload_capa, unsupported_offloads;
PMD_INIT_FUNC_TRACE();
@@ -958,6 +953,17 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
+ conf_offloads = tx_conf->offloads;
+ offload_capa = NICVF_TX_OFFLOAD_CAPA;
+
+ unsupported_offloads = conf_offloads & ~offload_capa;
+ if (unsupported_offloads) {
+ PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
+ "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
+ unsupported_offloads, conf_offloads, offload_capa);
+ return -ENOTSUP;
+ }
+
/* Tx deferred start is not supported */
if (tx_conf->tx_deferred_start) {
PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1007,11 +1013,11 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
txq->nic = nic;
txq->queue_id = qidx;
txq->tx_free_thresh = tx_free_thresh;
- txq->txq_flags = tx_conf->txq_flags;
txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
- is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
- txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
+ txq->offloads = conf_offloads;
+
+ is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
/* Choose optimum free threshold value for multipool case */
if (!is_single_pool) {
@@ -1042,9 +1048,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
nicvf_tx_queue_reset(txq);
- PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
+ PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
+ " phys=0x%" PRIx64 " offloads=0x%" PRIx64,
nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
- txq->phys);
+ txq->phys, txq->offloads);
dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1270,6 +1277,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t rx_free_thresh;
struct nicvf_rxq *rxq;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t conf_offloads, offload_capa, unsupported_offloads;
PMD_INIT_FUNC_TRACE();
@@ -1283,6 +1291,24 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
+
+ conf_offloads = rx_conf->offloads;
+
+ if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+ PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
+ conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
+ }
+
+ offload_capa = NICVF_RX_OFFLOAD_CAPA;
+ unsupported_offloads = conf_offloads & ~offload_capa;
+
+ if (unsupported_offloads) {
+ PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
+ "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+ unsupported_offloads, conf_offloads, offload_capa);
+ return -ENOTSUP;
+ }
+
/* Mempool memory must be contiguous, so must be one memory segment*/
if (mp->nb_mem_chunks != 1) {
PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1363,9 +1389,10 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
nicvf_rx_queue_reset(rxq);
- PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
+ PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
+ " phy=0x%" PRIx64 " offloads=0x%" PRIx64,
nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
- rte_mempool_avail_count(mp), rxq->phys);
+ rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1399,13 +1426,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = 1;
dev_info->max_vfs = pci_dev->max_vfs;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+ dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
+ dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+ dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
dev_info->reta_size = nic->rss_info.rss_size;
dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
@@ -1416,6 +1440,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -1426,6 +1451,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
ETH_TXQ_FLAGS_NOMULTMEMP |
ETH_TXQ_FLAGS_NOVLANOFFL |
ETH_TXQ_FLAGS_NOXSUMSCTP,
+ .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM,
};
}
@@ -1466,6 +1495,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
struct rte_mbuf *mbuf;
uint16_t rx_start, rx_end;
uint16_t tx_start, tx_end;
+ bool vlan_strip;
PMD_INIT_FUNC_TRACE();
@@ -1585,7 +1615,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
/* Configure VLAN Strip */
- nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
+ vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP);
+ nicvf_vlan_hw_strip(nic, vlan_strip);
/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
* to the 64bit memory address.
@@ -1713,11 +1745,11 @@ nicvf_dev_start(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * VLAN_TAG_SIZE > buffsz)
dev->data->scattered_rx = 1;
- if (rx_conf->enable_scatter)
+ if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
dev->data->scattered_rx = 1;
/* Setup MTU based on max_rx_pkt_len or default */
- mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
+ mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
dev->data->dev_conf.rxmode.max_rx_pkt_len
- ETHER_HDR_LEN - ETHER_CRC_LEN
: ETHER_MTU;
@@ -1891,6 +1923,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_txmode *txmode = &conf->txmode;
struct nicvf *nic = nicvf_pmd_priv(dev);
uint8_t cqcount;
+ uint64_t conf_rx_offloads, rx_offload_capa;
+ uint64_t conf_tx_offloads, tx_offload_capa;
PMD_INIT_FUNC_TRACE();
@@ -1899,44 +1933,49 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- if (txmode->mq_mode) {
- PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
- return -EINVAL;
- }
+ conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
+ tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
- if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
- rxmode->mq_mode != ETH_MQ_RX_RSS) {
- PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
- return -EINVAL;
+ if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
+ PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+ conf_tx_offloads, tx_offload_capa);
+ return -ENOTSUP;
}
- if (!rxmode->hw_strip_crc) {
- PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
- rxmode->hw_strip_crc = 1;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+ PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
+ rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
}
- if (rxmode->hw_ip_checksum) {
- PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
- rxmode->hw_ip_checksum = 0;
+ conf_rx_offloads = rxmode->offloads;
+ rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+
+ if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
+ PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+ conf_rx_offloads, rx_offload_capa);
+ return -ENOTSUP;
}
- if (rxmode->split_hdr_size) {
- PMD_INIT_LOG(INFO, "Rxmode does not support split header");
- return -EINVAL;
+ if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+ PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
+ rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- if (rxmode->hw_vlan_filter) {
- PMD_INIT_LOG(INFO, "VLAN filter not supported");
+ if (txmode->mq_mode) {
+ PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
return -EINVAL;
}
- if (rxmode->hw_vlan_extend) {
- PMD_INIT_LOG(INFO, "VLAN extended not supported");
+ if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
return -EINVAL;
}
- if (rxmode->enable_lro) {
- PMD_INIT_LOG(INFO, "LRO not supported");
+ if (rxmode->split_hdr_size) {
+ PMD_INIT_LOG(INFO, "Rxmode does not support split header");
return -EINVAL;
}
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index 71bc3cf2..ea8dccd1 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -1,39 +1,11 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef __THUNDERX_NICVF_ETHDEV_H__
#define __THUNDERX_NICVF_ETHDEV_H__
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#define THUNDERX_NICVF_PMD_VERSION "2.0"
#define THUNDERX_REG_BYTES 8
@@ -57,6 +29,20 @@
ETH_RSS_GENEVE | \
ETH_RSS_NVGRE)
+#define NICVF_TX_OFFLOAD_CAPA ( \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define NICVF_RX_OFFLOAD_CAPA ( \
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_CRC_STRIP | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_SCATTER)
+
#define NICVF_DEFAULT_RX_FREE_THRESH 224
#define NICVF_DEFAULT_TX_FREE_THRESH 224
#define NICVF_TX_FREE_MPOOL_THRESH 16
diff --git a/drivers/net/thunderx/nicvf_logs.h b/drivers/net/thunderx/nicvf_logs.h
index a76d1987..3c455b42 100644
--- a/drivers/net/thunderx/nicvf_logs.h
+++ b/drivers/net/thunderx/nicvf_logs.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef __THUNDERX_NICVF_LOGS__
@@ -35,49 +7,38 @@
#include <assert.h>
-#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-
-#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, ">>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#define NICVF_RX_ASSERT(x) assert(x)
#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
#define NICVF_RX_ASSERT(x) do { } while (0)
#endif
#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#define NICVF_TX_ASSERT(x) assert(x)
#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
#define NICVF_TX_ASSERT(x) do { } while (0)
#endif
-#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, nicvf_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, ">>")
+
#define PMD_DRV_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ rte_log(RTE_LOG_ ## level, nicvf_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
-#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
-#define PMD_DRV_FUNC_TRACE() do { } while (0)
-#endif
-#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX
#define PMD_MBOX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ rte_log(RTE_LOG_ ## level, nicvf_logtype_mbox, \
+ "%s(): " fmt "\n", __func__, ## args)
#define PMD_MBOX_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
-#else
-#define PMD_MBOX_LOG(level, fmt, args...) do { } while (0)
-#define PMD_MBOX_FUNC_TRACE() do { } while (0)
-#endif
+
+#define PMD_RX_LOG PMD_DRV_LOG
+#define PMD_TX_LOG PMD_DRV_LOG
+
+extern int nicvf_logtype_init;
+extern int nicvf_logtype_driver;
+extern int nicvf_logtype_mbox;
#endif /* __THUNDERX_NICVF_LOGS__ */
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index e27776e6..72305d9d 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#include <unistd.h>
@@ -41,7 +13,7 @@
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_errno.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ether.h>
#include <rte_log.h>
#include <rte_mbuf.h>
@@ -252,7 +224,7 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Inform HW to xmit the packets */
nicvf_addr_write(sq->sq_door, used_desc);
- return nb_pkts;
+ return i;
}
static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index a3ccce29..8bdd582e 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -1,40 +1,12 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef __THUNDERX_NICVF_RXTX_H__
#define __THUNDERX_NICVF_RXTX_H__
#include <rte_byteorder.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index 0f8208ef..d4a83c37 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef _THUNDERX_NICVF_STRUCT_H
@@ -39,7 +11,7 @@
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_memory.h>
struct nicvf_rbdr {
@@ -67,7 +39,7 @@ struct nicvf_txq {
uint32_t tail;
int32_t xmit_bufs;
uint32_t qlen_mask;
- uint32_t txq_flags;
+ uint64_t offloads;
uint16_t queue_id;
uint16_t tx_free_thresh;
} __rte_cache_aligned;
diff --git a/drivers/net/thunderx/nicvf_svf.c b/drivers/net/thunderx/nicvf_svf.c
index a7cc28a7..bccf2905 100644
--- a/drivers/net/thunderx/nicvf_svf.c
+++ b/drivers/net/thunderx/nicvf_svf.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#include <assert.h>
diff --git a/drivers/net/thunderx/nicvf_svf.h b/drivers/net/thunderx/nicvf_svf.h
index e58ab9d1..db5cb139 100644
--- a/drivers/net/thunderx/nicvf_svf.h
+++ b/drivers/net/thunderx/nicvf_svf.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
*/
#ifndef __THUNDERX_NICVF_SVF_H__
diff --git a/drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map b/drivers/net/thunderx/rte_pmd_thunderx_version.map
index 1901bcb3..1901bcb3 100644
--- a/drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map
+++ b/drivers/net/thunderx/rte_pmd_thunderx_version.map
diff --git a/drivers/net/vdev_netvsc/Makefile b/drivers/net/vdev_netvsc/Makefile
new file mode 100644
index 00000000..7be17137
--- /dev/null
+++ b/drivers/net/vdev_netvsc/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 6WIND S.A.
+# Copyright 2017 Mellanox Technologies, Ltd.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# Properties of the generated library.
+LIB = librte_pmd_vdev_netvsc.a
+LIBABIVER := 1
+EXPORT_MAP := rte_pmd_vdev_netvsc_version.map
+
+# Additional compilation flags.
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += -g
+CFLAGS += -Wall -Wextra
+CFLAGS += -D_XOPEN_SOURCE=600
+CFLAGS += -D_BSD_SOURCE
+CFLAGS += -D_DEFAULT_SOURCE
+CFLAGS += $(WERROR_FLAGS)
+
+# Dependencies.
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_net
+
+# Source files.
+SRCS-$(CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD) += vdev_netvsc.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/vdev_netvsc/rte_pmd_vdev_netvsc_version.map b/drivers/net/vdev_netvsc/rte_pmd_vdev_netvsc_version.map
new file mode 100644
index 00000000..179140fb
--- /dev/null
+++ b/drivers/net/vdev_netvsc/rte_pmd_vdev_netvsc_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+ local: *;
+};
diff --git a/drivers/net/vdev_netvsc/vdev_netvsc.c b/drivers/net/vdev_netvsc/vdev_netvsc.c
new file mode 100644
index 00000000..cbf4d590
--- /dev/null
+++ b/drivers/net/vdev_netvsc/vdev_netvsc.c
@@ -0,0 +1,752 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <netinet/ip.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_bus.h>
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_hypervisor.h>
+#include <rte_kvargs.h>
+#include <rte_log.h>
+
+#define VDEV_NETVSC_DRIVER net_vdev_netvsc
+#define VDEV_NETVSC_DRIVER_NAME RTE_STR(VDEV_NETVSC_DRIVER)
+#define VDEV_NETVSC_ARG_IFACE "iface"
+#define VDEV_NETVSC_ARG_MAC "mac"
+#define VDEV_NETVSC_ARG_FORCE "force"
+#define VDEV_NETVSC_ARG_IGNORE "ignore"
+#define VDEV_NETVSC_PROBE_MS 1000
+
+#define NETVSC_CLASS_ID "{f8615163-df3e-46c5-913f-f2d2f965ed0e}"
+#define NETVSC_MAX_ROUTE_LINE_SIZE 300
+
+#define DRV_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, \
+ vdev_netvsc_logtype, \
+ RTE_FMT(VDEV_NETVSC_DRIVER_NAME ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
+/** Driver-specific log messages type. */
+static int vdev_netvsc_logtype;
+
+/** Context structure for a vdev_netvsc instance. */
+struct vdev_netvsc_ctx {
+ LIST_ENTRY(vdev_netvsc_ctx) entry; /**< Next entry in list. */
+ unsigned int id; /**< Unique ID. */
+ char name[64]; /**< Unique name. */
+ char devname[64]; /**< Fail-safe instance name. */
+ char devargs[256]; /**< Fail-safe device arguments. */
+ char if_name[IF_NAMESIZE]; /**< NetVSC netdevice name. */
+ unsigned int if_index; /**< NetVSC netdevice index. */
+ struct ether_addr if_addr; /**< NetVSC MAC address. */
+ int pipe[2]; /**< Fail-safe communication pipe. */
+ char yield[256]; /**< PCI sub-device arguments. */
+};
+
+/** Context list is common to all driver instances. */
+static LIST_HEAD(, vdev_netvsc_ctx) vdev_netvsc_ctx_list =
+ LIST_HEAD_INITIALIZER(vdev_netvsc_ctx_list);
+
+/** Number of entries in context list. */
+static unsigned int vdev_netvsc_ctx_count;
+
+/** Number of driver instances relying on context list. */
+static unsigned int vdev_netvsc_ctx_inst;
+
+/**
+ * Destroy a vdev_netvsc context instance.
+ *
+ * @param ctx
+ * Context to destroy.
+ */
+static void
+vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx)
+{
+ if (ctx->pipe[0] != -1)
+ close(ctx->pipe[0]);
+ if (ctx->pipe[1] != -1)
+ close(ctx->pipe[1]);
+ free(ctx);
+}
+
+/**
+ * Iterate over system network interfaces.
+ *
+ * This function runs a given callback function for each netdevice found on
+ * the system.
+ *
+ * @param func
+ * Callback function pointer. List traversal is aborted when this function
+ * returns a nonzero value.
+ * @param ...
+ * Variable parameter list passed as @p va_list to @p func.
+ *
+ * @return
+ * 0 when the entire list is traversed successfully, a negative error code
+ * in case or failure, or the nonzero value returned by @p func when list
+ * traversal is aborted.
+ */
+static int
+vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface,
+ const struct ether_addr *eth_addr,
+ va_list ap), ...)
+{
+ struct if_nameindex *iface = if_nameindex();
+ int s = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
+ unsigned int i;
+ int ret = 0;
+
+ if (!iface) {
+ ret = -ENOBUFS;
+ DRV_LOG(ERR, "cannot retrieve system network interfaces");
+ goto error;
+ }
+ if (s == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot open socket: %s", rte_strerror(errno));
+ goto error;
+ }
+ for (i = 0; iface[i].if_name; ++i) {
+ struct ifreq req;
+ struct ether_addr eth_addr;
+ va_list ap;
+
+ strncpy(req.ifr_name, iface[i].if_name, sizeof(req.ifr_name));
+ if (ioctl(s, SIOCGIFHWADDR, &req) == -1) {
+ DRV_LOG(WARNING, "cannot retrieve information about"
+ " interface \"%s\": %s",
+ req.ifr_name, rte_strerror(errno));
+ continue;
+ }
+ if (req.ifr_hwaddr.sa_family != ARPHRD_ETHER) {
+ DRV_LOG(DEBUG, "interface %s is non-ethernet device",
+ req.ifr_name);
+ continue;
+ }
+ memcpy(eth_addr.addr_bytes, req.ifr_hwaddr.sa_data,
+ RTE_DIM(eth_addr.addr_bytes));
+ va_start(ap, func);
+ ret = func(&iface[i], &eth_addr, ap);
+ va_end(ap);
+ if (ret)
+ break;
+ }
+error:
+ if (s != -1)
+ close(s);
+ if (iface)
+ if_freenameindex(iface);
+ return ret;
+}
+
+/**
+ * Determine if a network interface is NetVSC.
+ *
+ * @param[in] iface
+ * Pointer to netdevice description structure (name and index).
+ *
+ * @return
+ * A nonzero value when interface is detected as NetVSC. In case of error,
+ * rte_errno is updated and 0 returned.
+ */
+static int
+vdev_netvsc_iface_is_netvsc(const struct if_nameindex *iface)
+{
+ static const char temp[] = "/sys/class/net/%s/device/class_id";
+ char path[sizeof(temp) + IF_NAMESIZE];
+ FILE *f;
+ int ret;
+ int len = 0;
+
+ ret = snprintf(path, sizeof(path), temp, iface->if_name);
+ if (ret == -1 || (size_t)ret >= sizeof(path)) {
+ rte_errno = ENOBUFS;
+ return 0;
+ }
+ f = fopen(path, "r");
+ if (!f) {
+ rte_errno = errno;
+ return 0;
+ }
+ ret = fscanf(f, NETVSC_CLASS_ID "%n", &len);
+ if (ret == EOF)
+ rte_errno = errno;
+ ret = len == (int)strlen(NETVSC_CLASS_ID);
+ fclose(f);
+ return ret;
+}
+
+/**
+ * Determine if a network interface has a route.
+ *
+ * @param[in] name
+ * Network device name.
+ *
+ * @return
+ * A nonzero value when interface has an route. In case of error,
+ * rte_errno is updated and 0 returned.
+ */
+static int
+vdev_netvsc_has_route(const char *name)
+{
+ FILE *fp;
+ int ret = 0;
+ char route[NETVSC_MAX_ROUTE_LINE_SIZE];
+ char *netdev;
+
+ fp = fopen("/proc/net/route", "r");
+ if (!fp) {
+ rte_errno = errno;
+ return 0;
+ }
+ while (fgets(route, NETVSC_MAX_ROUTE_LINE_SIZE, fp) != NULL) {
+ netdev = strtok(route, "\t");
+ if (strcmp(netdev, name) == 0) {
+ ret = 1;
+ break;
+ }
+ /* Move file pointer to the next line. */
+ while (strchr(route, '\n') == NULL &&
+ fgets(route, NETVSC_MAX_ROUTE_LINE_SIZE, fp) != NULL)
+ ;
+ }
+ fclose(fp);
+ return ret;
+}
+
+/**
+ * Retrieve network interface data from sysfs symbolic link.
+ *
+ * @param[out] buf
+ * Output data buffer.
+ * @param size
+ * Output buffer size.
+ * @param[in] if_name
+ * Netdevice name.
+ * @param[in] relpath
+ * Symbolic link path relative to netdevice sysfs entry.
+ *
+ * @return
+ * 0 on success, a negative error code otherwise.
+ */
+static int
+vdev_netvsc_sysfs_readlink(char *buf, size_t size, const char *if_name,
+ const char *relpath)
+{
+ int ret;
+
+ ret = snprintf(buf, size, "/sys/class/net/%s/%s", if_name, relpath);
+ if (ret == -1 || (size_t)ret >= size)
+ return -ENOBUFS;
+ ret = readlink(buf, buf, size);
+ if (ret == -1)
+ return -errno;
+ if ((size_t)ret >= size - 1)
+ return -ENOBUFS;
+ buf[ret] = '\0';
+ return 0;
+}
+
+/**
+ * Probe a network interface to associate with vdev_netvsc context.
+ *
+ * This function determines if the network device matches the properties of
+ * the NetVSC interface associated with the vdev_netvsc context and
+ * communicates its bus address to the fail-safe PMD instance if so.
+ *
+ * It is normally used with vdev_netvsc_foreach_iface().
+ *
+ * @param[in] iface
+ * Pointer to netdevice description structure (name and index).
+ * @param[in] eth_addr
+ * MAC address associated with @p iface.
+ * @param ap
+ * Variable arguments list comprising:
+ *
+ * - struct vdev_netvsc_ctx *ctx:
+ * Context to associate network interface with.
+ *
+ * @return
+ * A nonzero value when interface matches, 0 otherwise or in case of
+ * error.
+ */
+static int
+vdev_netvsc_device_probe(const struct if_nameindex *iface,
+ const struct ether_addr *eth_addr,
+ va_list ap)
+{
+ struct vdev_netvsc_ctx *ctx = va_arg(ap, struct vdev_netvsc_ctx *);
+ char buf[RTE_MAX(sizeof(ctx->yield), 256u)];
+ const char *addr;
+ size_t len;
+ int ret;
+
+ /* Skip non-matching or unwanted NetVSC interfaces. */
+ if (ctx->if_index == iface->if_index) {
+ if (!strcmp(ctx->if_name, iface->if_name))
+ return 0;
+ DRV_LOG(DEBUG,
+ "NetVSC interface \"%s\" (index %u) renamed \"%s\"",
+ ctx->if_name, ctx->if_index, iface->if_name);
+ strncpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
+ return 0;
+ }
+ if (vdev_netvsc_iface_is_netvsc(iface))
+ return 0;
+ if (!is_same_ether_addr(eth_addr, &ctx->if_addr))
+ return 0;
+ /* Look for associated PCI device. */
+ ret = vdev_netvsc_sysfs_readlink(buf, sizeof(buf), iface->if_name,
+ "device/subsystem");
+ if (ret)
+ return 0;
+ addr = strrchr(buf, '/');
+ addr = addr ? addr + 1 : buf;
+ if (strcmp(addr, "pci"))
+ return 0;
+ ret = vdev_netvsc_sysfs_readlink(buf, sizeof(buf), iface->if_name,
+ "device");
+ if (ret)
+ return 0;
+ addr = strrchr(buf, '/');
+ addr = addr ? addr + 1 : buf;
+ len = strlen(addr);
+ if (!len)
+ return 0;
+ /* Send PCI device argument to fail-safe PMD instance. */
+ if (strcmp(addr, ctx->yield))
+ DRV_LOG(DEBUG, "associating PCI device \"%s\" with NetVSC"
+ " interface \"%s\" (index %u)", addr, ctx->if_name,
+ ctx->if_index);
+ memmove(buf, addr, len + 1);
+ addr = buf;
+ buf[len] = '\n';
+ ret = write(ctx->pipe[1], addr, len + 1);
+ buf[len] = '\0';
+ if (ret == -1) {
+ if (errno == EINTR || errno == EAGAIN)
+ return 1;
+ DRV_LOG(WARNING, "cannot associate PCI device name \"%s\" with"
+ " interface \"%s\": %s", addr, ctx->if_name,
+ rte_strerror(errno));
+ return 1;
+ }
+ if ((size_t)ret != len + 1) {
+ /*
+ * Attempt to override previous partial write, no need to
+ * recover if that fails.
+ */
+ ret = write(ctx->pipe[1], "\n", 1);
+ (void)ret;
+ return 1;
+ }
+ fsync(ctx->pipe[1]);
+ memcpy(ctx->yield, addr, len + 1);
+ return 1;
+}
+
+/**
+ * Alarm callback that regularly probes system network interfaces.
+ *
+ * This callback runs at a frequency determined by VDEV_NETVSC_PROBE_MS as
+ * long as an vdev_netvsc context instance exists.
+ *
+ * @param arg
+ * Ignored.
+ */
+static void
+vdev_netvsc_alarm(__rte_unused void *arg)
+{
+ struct vdev_netvsc_ctx *ctx;
+ int ret;
+
+ LIST_FOREACH(ctx, &vdev_netvsc_ctx_list, entry) {
+ ret = vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, ctx);
+ if (ret < 0)
+ break;
+ }
+ if (!vdev_netvsc_ctx_count)
+ return;
+ ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000,
+ vdev_netvsc_alarm, NULL);
+ if (ret < 0) {
+ DRV_LOG(ERR, "unable to reschedule alarm callback: %s",
+ rte_strerror(-ret));
+ }
+}
+
+/**
+ * Probe a NetVSC interface to generate a vdev_netvsc context from.
+ *
+ * This function instantiates vdev_netvsc contexts either for all NetVSC
+ * devices found on the system or only a subset provided as device
+ * arguments.
+ *
+ * It is normally used with vdev_netvsc_foreach_iface().
+ *
+ * @param[in] iface
+ * Pointer to netdevice description structure (name and index).
+ * @param[in] eth_addr
+ * MAC address associated with @p iface.
+ * @param ap
+ * Variable arguments list comprising:
+ *
+ * - const char *name:
+ * Name associated with current driver instance.
+ *
+ * - struct rte_kvargs *kvargs:
+ * Device arguments provided to current driver instance.
+ *
+ * - int force:
+ * Accept specified interface even if not detected as NetVSC.
+ *
+ * - unsigned int specified:
+ * Number of specific netdevices provided as device arguments.
+ *
+ * - unsigned int *matched:
+ * The number of specified netdevices matched by this function.
+ *
+ * @return
+ * A nonzero value when interface matches, 0 otherwise or in case of
+ * error.
+ */
+static int
+vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
+ const struct ether_addr *eth_addr,
+ va_list ap)
+{
+ const char *name = va_arg(ap, const char *);
+ struct rte_kvargs *kvargs = va_arg(ap, struct rte_kvargs *);
+ int force = va_arg(ap, int);
+ unsigned int specified = va_arg(ap, unsigned int);
+ unsigned int *matched = va_arg(ap, unsigned int *);
+ unsigned int i;
+ struct vdev_netvsc_ctx *ctx;
+ int ret;
+
+ /* Probe all interfaces when none are specified. */
+ if (specified) {
+ for (i = 0; i != kvargs->count; ++i) {
+ const struct rte_kvargs_pair *pair = &kvargs->pairs[i];
+
+ if (!strcmp(pair->key, VDEV_NETVSC_ARG_IFACE)) {
+ if (!strcmp(pair->value, iface->if_name))
+ break;
+ } else if (!strcmp(pair->key, VDEV_NETVSC_ARG_MAC)) {
+ struct ether_addr tmp;
+
+ if (sscanf(pair->value,
+ "%" SCNx8 ":%" SCNx8 ":%" SCNx8 ":"
+ "%" SCNx8 ":%" SCNx8 ":%" SCNx8,
+ &tmp.addr_bytes[0],
+ &tmp.addr_bytes[1],
+ &tmp.addr_bytes[2],
+ &tmp.addr_bytes[3],
+ &tmp.addr_bytes[4],
+ &tmp.addr_bytes[5]) != 6) {
+ DRV_LOG(ERR,
+ "invalid MAC address format"
+ " \"%s\"",
+ pair->value);
+ return -EINVAL;
+ }
+ if (is_same_ether_addr(eth_addr, &tmp))
+ break;
+ }
+ }
+ if (i == kvargs->count)
+ return 0;
+ ++(*matched);
+ }
+ /* Weed out interfaces already handled. */
+ LIST_FOREACH(ctx, &vdev_netvsc_ctx_list, entry)
+ if (ctx->if_index == iface->if_index)
+ break;
+ if (ctx) {
+ if (!specified)
+ return 0;
+ DRV_LOG(WARNING,
+ "interface \"%s\" (index %u) is already handled,"
+ " skipping",
+ iface->if_name, iface->if_index);
+ return 0;
+ }
+ if (!vdev_netvsc_iface_is_netvsc(iface)) {
+ if (!specified || !force)
+ return 0;
+ DRV_LOG(WARNING,
+ "using non-NetVSC interface \"%s\" (index %u)",
+ iface->if_name, iface->if_index);
+ }
+ /* Routed NetVSC should not be probed. */
+ if (vdev_netvsc_has_route(iface->if_name)) {
+ if (!specified || !force)
+ return 0;
+ DRV_LOG(WARNING, "using routed NetVSC interface \"%s\""
+ " (index %u)", iface->if_name, iface->if_index);
+ }
+ /* Create interface context. */
+ ctx = calloc(1, sizeof(*ctx));
+ if (!ctx) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot allocate context for interface \"%s\": %s",
+ iface->if_name, rte_strerror(errno));
+ goto error;
+ }
+ ctx->id = vdev_netvsc_ctx_count;
+ strncpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
+ ctx->if_index = iface->if_index;
+ ctx->if_addr = *eth_addr;
+ ctx->pipe[0] = -1;
+ ctx->pipe[1] = -1;
+ ctx->yield[0] = '\0';
+ if (pipe(ctx->pipe) == -1) {
+ ret = -errno;
+ DRV_LOG(ERR,
+ "cannot allocate control pipe for interface \"%s\": %s",
+ ctx->if_name, rte_strerror(errno));
+ goto error;
+ }
+ for (i = 0; i != RTE_DIM(ctx->pipe); ++i) {
+ int flf = fcntl(ctx->pipe[i], F_GETFL);
+
+ if (flf != -1 &&
+ fcntl(ctx->pipe[i], F_SETFL, flf | O_NONBLOCK) != -1)
+ continue;
+ ret = -errno;
+ DRV_LOG(ERR, "cannot toggle non-blocking flag on control file"
+ " descriptor #%u (%d): %s", i, ctx->pipe[i],
+ rte_strerror(errno));
+ goto error;
+ }
+ /* Generate virtual device name and arguments. */
+ i = 0;
+ ret = snprintf(ctx->name, sizeof(ctx->name), "%s_id%u",
+ name, ctx->id);
+ if (ret == -1 || (size_t)ret >= sizeof(ctx->name))
+ ++i;
+ ret = snprintf(ctx->devname, sizeof(ctx->devname), "net_failsafe_%s",
+ ctx->name);
+ if (ret == -1 || (size_t)ret >= sizeof(ctx->devname))
+ ++i;
+ ret = snprintf(ctx->devargs, sizeof(ctx->devargs),
+ "fd(%d),dev(net_tap_%s,remote=%s)",
+ ctx->pipe[0], ctx->name, ctx->if_name);
+ if (ret == -1 || (size_t)ret >= sizeof(ctx->devargs))
+ ++i;
+ if (i) {
+ ret = -ENOBUFS;
+ DRV_LOG(ERR, "generated virtual device name or argument list"
+ " too long for interface \"%s\"", ctx->if_name);
+ goto error;
+ }
+ /* Request virtual device generation. */
+ DRV_LOG(DEBUG, "generating virtual device \"%s\" with arguments \"%s\"",
+ ctx->devname, ctx->devargs);
+ vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, ctx);
+ ret = rte_eal_hotplug_add("vdev", ctx->devname, ctx->devargs);
+ if (ret)
+ goto error;
+ LIST_INSERT_HEAD(&vdev_netvsc_ctx_list, ctx, entry);
+ ++vdev_netvsc_ctx_count;
+ DRV_LOG(DEBUG, "added NetVSC interface \"%s\" to context list",
+ ctx->if_name);
+ return 0;
+error:
+ if (ctx)
+ vdev_netvsc_ctx_destroy(ctx);
+ return ret;
+}
+
+/**
+ * Probe NetVSC interfaces.
+ *
+ * This function probes system netdevices according to the specified device
+ * arguments and starts a periodic alarm callback to notify the resulting
+ * fail-safe PMD instances of their sub-devices whereabouts.
+ *
+ * @param dev
+ * Virtual device context for driver instance.
+ *
+ * @return
+ * Always 0, even in case of errors.
+ */
+static int
+vdev_netvsc_vdev_probe(struct rte_vdev_device *dev)
+{
+ static const char *const vdev_netvsc_arg[] = {
+ VDEV_NETVSC_ARG_IFACE,
+ VDEV_NETVSC_ARG_MAC,
+ VDEV_NETVSC_ARG_FORCE,
+ VDEV_NETVSC_ARG_IGNORE,
+ NULL,
+ };
+ const char *name = rte_vdev_device_name(dev);
+ const char *args = rte_vdev_device_args(dev);
+ struct rte_kvargs *kvargs = rte_kvargs_parse(args ? args : "",
+ vdev_netvsc_arg);
+ unsigned int specified = 0;
+ unsigned int matched = 0;
+ int force = 0;
+ int ignore = 0;
+ unsigned int i;
+ int ret;
+
+ DRV_LOG(DEBUG, "invoked as \"%s\", using arguments \"%s\"", name, args);
+ if (!kvargs) {
+ DRV_LOG(ERR, "cannot parse arguments list");
+ goto error;
+ }
+ for (i = 0; i != kvargs->count; ++i) {
+ const struct rte_kvargs_pair *pair = &kvargs->pairs[i];
+
+ if (!strcmp(pair->key, VDEV_NETVSC_ARG_FORCE))
+ force = !!atoi(pair->value);
+ else if (!strcmp(pair->key, VDEV_NETVSC_ARG_IGNORE))
+ ignore = !!atoi(pair->value);
+ else if (!strcmp(pair->key, VDEV_NETVSC_ARG_IFACE) ||
+ !strcmp(pair->key, VDEV_NETVSC_ARG_MAC))
+ ++specified;
+ }
+ if (ignore) {
+ if (kvargs)
+ rte_kvargs_free(kvargs);
+ return 0;
+ }
+ rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL);
+ /* Gather interfaces. */
+ ret = vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, name, kvargs,
+ force, specified, &matched);
+ if (ret < 0)
+ goto error;
+ if (matched < specified)
+ DRV_LOG(WARNING,
+ "some of the specified parameters did not match"
+ " recognized network interfaces");
+ ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000,
+ vdev_netvsc_alarm, NULL);
+ if (ret < 0) {
+ DRV_LOG(ERR, "unable to schedule alarm callback: %s",
+ rte_strerror(-ret));
+ goto error;
+ }
+error:
+ if (kvargs)
+ rte_kvargs_free(kvargs);
+ ++vdev_netvsc_ctx_inst;
+ return 0;
+}
+
+/**
+ * Remove driver instance.
+ *
+ * The alarm callback and underlying vdev_netvsc context instances are only
+ * destroyed after the last PMD instance is removed.
+ *
+ * @param dev
+ * Virtual device context for driver instance.
+ *
+ * @return
+ * Always 0.
+ */
+static int
+vdev_netvsc_vdev_remove(__rte_unused struct rte_vdev_device *dev)
+{
+ if (--vdev_netvsc_ctx_inst)
+ return 0;
+ rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL);
+ while (!LIST_EMPTY(&vdev_netvsc_ctx_list)) {
+ struct vdev_netvsc_ctx *ctx = LIST_FIRST(&vdev_netvsc_ctx_list);
+
+ LIST_REMOVE(ctx, entry);
+ --vdev_netvsc_ctx_count;
+ vdev_netvsc_ctx_destroy(ctx);
+ }
+ return 0;
+}
+
+/** Virtual device descriptor. */
+static struct rte_vdev_driver vdev_netvsc_vdev = {
+ .probe = vdev_netvsc_vdev_probe,
+ .remove = vdev_netvsc_vdev_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(VDEV_NETVSC_DRIVER, vdev_netvsc_vdev);
+RTE_PMD_REGISTER_ALIAS(VDEV_NETVSC_DRIVER, eth_vdev_netvsc);
+RTE_PMD_REGISTER_PARAM_STRING(net_vdev_netvsc,
+ VDEV_NETVSC_ARG_IFACE "=<string> "
+ VDEV_NETVSC_ARG_MAC "=<string> "
+ VDEV_NETVSC_ARG_FORCE "=<int> "
+ VDEV_NETVSC_ARG_IGNORE "=<int>");
+
+/** Initialize driver log type. */
+RTE_INIT(vdev_netvsc_init_log)
+{
+ vdev_netvsc_logtype = rte_log_register("pmd.vdev_netvsc");
+ if (vdev_netvsc_logtype >= 0)
+ rte_log_set_level(vdev_netvsc_logtype, RTE_LOG_NOTICE);
+}
+
+/** Compare function for vdev find device operation. */
+static int
+vdev_netvsc_cmp_rte_device(const struct rte_device *dev1,
+ __rte_unused const void *_dev2)
+{
+ return strcmp(dev1->devargs->name, VDEV_NETVSC_DRIVER_NAME);
+}
+
+/**
+ * A callback called by vdev bus scan function to ensure this driver probing
+ * automatically in Hyper-V VM system unless it already exists in the
+ * devargs list.
+ */
+static void
+vdev_netvsc_scan_callback(__rte_unused void *arg)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ struct rte_bus *vbus = rte_bus_find_by_name("vdev");
+
+ TAILQ_FOREACH(devargs, &devargs_list, next)
+ if (!strcmp(devargs->name, VDEV_NETVSC_DRIVER_NAME))
+ return;
+ dev = (struct rte_vdev_device *)vbus->find_device(NULL,
+ vdev_netvsc_cmp_rte_device, VDEV_NETVSC_DRIVER_NAME);
+ if (dev)
+ return;
+ if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, VDEV_NETVSC_DRIVER_NAME))
+ DRV_LOG(ERR, "unable to add netvsc devargs.");
+}
+
+/** Initialize the custom scan. */
+RTE_INIT(vdev_netvsc_custom_scan_add)
+{
+ if (rte_hypervisor_get() == RTE_HYPERVISOR_HYPERV)
+ rte_vdev_add_custom_scan(vdev_netvsc_scan_callback, NULL);
+}
diff --git a/drivers/net/vhost/Makefile b/drivers/net/vhost/Makefile
index c411745b..83b5a8b8 100644
--- a/drivers/net/vhost/Makefile
+++ b/drivers/net/vhost/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright (c) 2010-2016 Intel Corporation.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2016 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 2536ee4a..3aae01c3 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -35,7 +35,7 @@
#include <stdbool.h>
#include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -607,10 +607,9 @@ new_device(int vid)
rte_atomic32_set(&internal->dev_attached, 1);
update_queuing_status(eth_dev);
- RTE_LOG(INFO, PMD, "New connection established\n");
+ RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
return 0;
}
@@ -662,10 +661,9 @@ destroy_device(int vid)
state->max_vring = 0;
rte_spinlock_unlock(&state->lock);
- RTE_LOG(INFO, PMD, "Connection closed\n");
+ RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int
@@ -694,8 +692,7 @@ vring_state_changed(int vid, uint16_t vring, int enable)
RTE_LOG(INFO, PMD, "vring%u is %s\n",
vring, enable ? "enabled" : "disabled");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE,
- NULL, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
return 0;
}
diff --git a/drivers/net/virtio/Makefile b/drivers/net/virtio/Makefile
index f2b5d1c3..6c2c9967 100644
--- a/drivers/net/virtio/Makefile
+++ b/drivers/net/virtio/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
@@ -36,6 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_virtio.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index e0328f61..884f74ad 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <stdint.h>
@@ -37,7 +8,7 @@
#include <errno.h>
#include <unistd.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -48,6 +19,8 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_arp.h>
#include <rte_common.h>
#include <rte_errno.h>
#include <rte_cpuflags.h>
@@ -55,6 +28,7 @@
#include <rte_memory.h>
#include <rte_eal.h>
#include <rte_dev.h>
+#include <rte_cycles.h>
#include "virtio_ethdev.h"
#include "virtio_pci.h"
@@ -106,6 +80,12 @@ static int virtio_dev_queue_stats_mapping_set(
uint8_t stat_idx,
uint8_t is_rx);
+int virtio_logtype_init;
+int virtio_logtype_driver;
+
+static void virtio_notify_peers(struct rte_eth_dev *dev);
+static void virtio_ack_link_announce(struct rte_eth_dev *dev);
+
/*
* The set of PCI devices this driver supports
*/
@@ -177,6 +157,8 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
PMD_INIT_LOG(ERR, "Control queue is not supported.");
return -1;
}
+
+ rte_spinlock_lock(&cvq->lock);
vq = cvq->vq;
head = vq->vq_desc_head_idx;
@@ -184,8 +166,10 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
"vq->hw->cvq = %p vq = %p",
vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
- if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
+ if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
+ rte_spinlock_unlock(&cvq->lock);
return -1;
+ }
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
sizeof(struct virtio_pmd_ctrl));
@@ -261,6 +245,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
result = cvq->virtio_net_hdr_mz->addr;
+ rte_spinlock_unlock(&cvq->lock);
return result->status;
}
@@ -294,17 +279,6 @@ virtio_dev_queue_release(void *queue __rte_unused)
/* do nothing */
}
-static int
-virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
-{
- if (vtpci_queue_idx == hw->max_queue_pairs * 2)
- return VTNET_CQ;
- else if (vtpci_queue_idx % 2 == 0)
- return VTNET_RQ;
- else
- return VTNET_TQ;
-}
-
static uint16_t
virtio_get_nr_vq(struct virtio_hw *hw)
{
@@ -893,7 +867,7 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
/* Note: limit checked in rte_eth_xstats_names() */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct virtqueue *rxvq = dev->data->rx_queues[i];
+ struct virtnet_rx *rxvq = dev->data->rx_queues[i];
if (rxvq == NULL)
continue;
for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
@@ -906,7 +880,7 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct virtqueue *txvq = dev->data->tx_queues[i];
+ struct virtnet_tx *txvq = dev->data->tx_queues[i];
if (txvq == NULL)
continue;
for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
@@ -1244,9 +1218,97 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
return 0;
}
+int
+virtio_dev_pause(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ rte_spinlock_lock(&hw->state_lock);
+
+ if (hw->started == 0) {
+ /* Device is just stopped. */
+ rte_spinlock_unlock(&hw->state_lock);
+ return -1;
+ }
+ hw->started = 0;
+ /*
+ * Prevent the worker threads from touching queues to avoid contention,
+ * 1 ms should be enough for the ongoing Tx function to finish.
+ */
+ rte_delay_ms(1);
+ return 0;
+}
+
+/*
+ * Recover hw state to let the worker threads continue.
+ */
+void
+virtio_dev_resume(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ hw->started = 1;
+ rte_spinlock_unlock(&hw->state_lock);
+}
+
+/*
+ * Should be called only after device is paused.
+ */
+int
+virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
+ int nb_pkts)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_tx *txvq = dev->data->tx_queues[0];
+ int ret;
+
+ hw->inject_pkts = tx_pkts;
+ ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
+ hw->inject_pkts = NULL;
+
+ return ret;
+}
+
+static void
+virtio_notify_peers(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_rx *rxvq = dev->data->rx_queues[0];
+ struct rte_mbuf *rarp_mbuf;
+
+ rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
+ (struct ether_addr *)hw->mac_addr);
+ if (rarp_mbuf == NULL) {
+ PMD_DRV_LOG(ERR, "failed to make RARP packet.");
+ return;
+ }
+
+ /* If virtio port just stopped, no need to send RARP */
+ if (virtio_dev_pause(dev) < 0) {
+ rte_pktmbuf_free(rarp_mbuf);
+ return;
+ }
+
+ virtio_inject_pkts(dev, &rarp_mbuf, 1);
+ virtio_dev_resume(dev);
+}
+
+static void
+virtio_ack_link_announce(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtio_pmd_ctrl ctrl;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
+
+ virtio_send_command(hw->cvq, &ctrl, NULL, 0);
+}
+
/*
- * Process Virtio Config changed interrupt and call the callback
- * if link state changed.
+ * Process virtio config changed interrupt. Call the callback
+ * if link state changed, generate gratuitous RARP packet if
+ * the status indicates an ANNOUNCE.
*/
void
virtio_interrupt_handler(void *param)
@@ -1266,9 +1328,13 @@ virtio_interrupt_handler(void *param)
if (virtio_dev_link_update(dev, 0) == 0)
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
+ if (isr & VIRTIO_NET_S_ANNOUNCE) {
+ virtio_notify_peers(dev);
+ virtio_ack_link_announce(dev);
+ }
}
/* set rx and tx handlers according to what is supported */
@@ -1405,6 +1471,11 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
/* Reset the device although not necessary at startup */
vtpci_reset(hw);
+ if (hw->vqs) {
+ virtio_dev_free_mbufs(eth_dev);
+ virtio_free_queues(hw);
+ }
+
/* Tell the host we've noticed this device. */
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
@@ -1508,6 +1579,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
} else {
PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
hw->max_queue_pairs = 1;
+ hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
+ VLAN_TAG_LEN - hw->vtnet_hdr_size;
}
ret = virtio_alloc_queues(eth_dev);
@@ -1754,7 +1827,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
if (rxmode->enable_lro &&
(!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
+ !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
PMD_DRV_LOG(ERR,
"Large Receive Offload not available on this host");
return -ENOTSUP;
@@ -1781,10 +1854,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -EBUSY;
}
+ rte_spinlock_init(&hw->state_lock);
+
hw->use_simple_rx = 1;
hw->use_simple_tx = 1;
-#if defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
+#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
hw->use_simple_rx = 0;
hw->use_simple_tx = 0;
@@ -1860,7 +1935,7 @@ virtio_dev_start(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxvq = dev->data->rx_queues[i];
/* Flush the old packets */
- virtqueue_flush(rxvq->vq);
+ virtqueue_rxvq_flush(rxvq->vq);
virtqueue_notify(rxvq->vq);
}
@@ -1892,47 +1967,47 @@ virtio_dev_start(struct rte_eth_dev *dev)
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
{
+ struct virtio_hw *hw = dev->data->dev_private;
+ uint16_t nr_vq = virtio_get_nr_vq(hw);
+ const char *type __rte_unused;
+ unsigned int i, mbuf_num = 0;
+ struct virtqueue *vq;
struct rte_mbuf *buf;
- int i, mbuf_num = 0;
-
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct virtnet_rx *rxvq = dev->data->rx_queues[i];
-
- PMD_INIT_LOG(DEBUG,
- "Before freeing rxq[%d] used and unused buf", i);
- VIRTQUEUE_DUMP(rxvq->vq);
+ int queue_type;
- PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
- while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
- rte_pktmbuf_free(buf);
- mbuf_num++;
- }
+ if (hw->vqs == NULL)
+ return;
- PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
- PMD_INIT_LOG(DEBUG,
- "After freeing rxq[%d] used and unused buf", i);
- VIRTQUEUE_DUMP(rxvq->vq);
- }
+ for (i = 0; i < nr_vq; i++) {
+ vq = hw->vqs[i];
+ if (!vq)
+ continue;
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct virtnet_tx *txvq = dev->data->tx_queues[i];
+ queue_type = virtio_get_queue_type(hw, i);
+ if (queue_type == VTNET_RQ)
+ type = "rxq";
+ else if (queue_type == VTNET_TQ)
+ type = "txq";
+ else
+ continue;
PMD_INIT_LOG(DEBUG,
- "Before freeing txq[%d] used and unused bufs",
- i);
- VIRTQUEUE_DUMP(txvq->vq);
+ "Before freeing %s[%d] used and unused buf",
+ type, i);
+ VIRTQUEUE_DUMP(vq);
- mbuf_num = 0;
- while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
+ while ((buf = virtqueue_detach_unused(vq)) != NULL) {
rte_pktmbuf_free(buf);
mbuf_num++;
}
- PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
PMD_INIT_LOG(DEBUG,
- "After freeing txq[%d] used and unused buf", i);
- VIRTQUEUE_DUMP(txvq->vq);
+ "After freeing %s[%d] used and unused buf",
+ type, i);
+ VIRTQUEUE_DUMP(vq);
}
+
+ PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
}
/*
@@ -1947,12 +2022,14 @@ virtio_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "stop");
+ rte_spinlock_lock(&hw->state_lock);
if (intr_conf->lsc || intr_conf->rxq)
virtio_intr_disable(dev);
hw->started = 0;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_write_link_status(dev, &link);
+ rte_spinlock_unlock(&hw->state_lock);
}
static int
@@ -1965,7 +2042,7 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
virtio_dev_atomic_read_link_status(dev, &link);
old = link;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = SPEED_10G;
+ link.link_speed = ETH_SPEED_NUM_10G;
if (hw->started == 0) {
link.link_status = ETH_LINK_DOWN;
@@ -2072,3 +2149,15 @@ __rte_unused uint8_t is_rx)
RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(virtio_init_log);
+static void
+virtio_init_log(void)
+{
+ virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
+ if (virtio_logtype_init >= 0)
+ rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE);
+ virtio_logtype_driver = rte_log_register("pmd.net.virtio.driver");
+ if (virtio_logtype_driver >= 0)
+ rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 2039bc54..4539d2e4 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _VIRTIO_ETHDEV_H_
@@ -38,11 +9,6 @@
#include "virtio_pci.h"
-#define SPEED_10 10
-#define SPEED_100 100
-#define SPEED_1000 1000
-#define SPEED_10G 10000
-
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
@@ -67,6 +33,7 @@
1u << VIRTIO_NET_F_HOST_TSO6 | \
1u << VIRTIO_NET_F_MRG_RXBUF | \
1u << VIRTIO_NET_F_MTU | \
+ 1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE | \
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_IOMMU_PLATFORM)
@@ -121,4 +88,9 @@ int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
void virtio_interrupt_handler(void *param);
+int virtio_dev_pause(struct rte_eth_dev *dev);
+void virtio_dev_resume(struct rte_eth_dev *dev);
+int virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
+ int nb_pkts);
+
#endif /* _VIRTIO_ETHDEV_H_ */
diff --git a/drivers/net/virtio/virtio_logs.h b/drivers/net/virtio/virtio_logs.h
index 90a79eaa..9b1b1def 100644
--- a/drivers/net/virtio/virtio_logs.h
+++ b/drivers/net/virtio/virtio_logs.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _VIRTIO_LOGS_H_
@@ -36,14 +7,12 @@
#include <rte_log.h>
-#ifdef RTE_LIBRTE_VIRTIO_DEBUG_INIT
+extern int virtio_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ rte_log(RTE_LOG_ ## level, virtio_logtype_init, \
+ "%s(): " fmt "\n", __func__, ##args)
+
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
-#define PMD_INIT_FUNC_TRACE() do { } while(0)
-#endif
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -59,12 +28,9 @@
#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
#endif
-
-#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER
+extern int virtio_logtype_driver;
#define PMD_DRV_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
-#endif
+ rte_log(RTE_LOG_ ## level, virtio_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
#endif /* _VIRTIO_LOGS_H_ */
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index 9574498f..6bd22e54 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <stdint.h>
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 3c5ce66c..a28ba833 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _VIRTIO_PCI_H_
@@ -38,7 +9,7 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
struct virtqueue;
struct virtnet_ctl;
@@ -270,6 +241,13 @@ struct virtio_hw {
struct virtio_pci_common_cfg *common_cfg;
struct virtio_net_config *dev_cfg;
void *virtio_user_dev;
+ /*
+ * App management thread and virtio interrupt handler thread
+ * both can change device state, this lock is meant to avoid
+ * such a contention.
+ */
+ rte_spinlock_t state_lock;
+ struct rte_mbuf **inject_pkts;
struct virtqueue **vqs;
};
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index fcecc161..9e3c2a01 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _VIRTIO_RING_H_
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 390c137c..8dbf2a30 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <stdint.h>
@@ -44,7 +15,7 @@
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
@@ -59,6 +30,7 @@
#include "virtio_pci.h"
#include "virtqueue.h"
#include "virtio_rxtx.h"
+#include "virtio_rxtx_simple.h"
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -407,6 +379,7 @@ virtio_dev_cq_start(struct rte_eth_dev *dev)
struct virtio_hw *hw = dev->data->dev_private;
if (hw->cvq && hw->cvq->vq) {
+ rte_spinlock_init(&hw->cvq->lock);
VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
}
}
@@ -465,6 +438,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
vq->vq_ring.desc[desc_idx].flags =
VRING_DESC_F_WRITE;
}
+
+ virtio_rxq_vec_setup(rxvq);
}
memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
@@ -474,30 +449,31 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
&rxvq->fake_mbuf;
}
- while (!virtqueue_full(vq)) {
- m = rte_mbuf_raw_alloc(rxvq->mpool);
- if (m == NULL)
- break;
+ if (hw->use_simple_rx) {
+ while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxvq);
+ nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ }
+ } else {
+ while (!virtqueue_full(vq)) {
+ m = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (m == NULL)
+ break;
- /* Enqueue allocated buffers */
- if (hw->use_simple_rx)
- error = virtqueue_enqueue_recv_refill_simple(vq, m);
- else
+ /* Enqueue allocated buffers */
error = virtqueue_enqueue_recv_refill(vq, m);
-
- if (error) {
- rte_pktmbuf_free(m);
- break;
+ if (error) {
+ rte_pktmbuf_free(m);
+ break;
+ }
+ nbufs++;
}
- nbufs++;
- }
- vq_update_avail_idx(vq);
+ vq_update_avail_idx(vq);
+ }
PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
- virtio_rxq_vec_setup(rxvq);
-
VIRTQUEUE_DUMP(vq);
return 0;
@@ -1016,7 +992,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t nb_used, nb_tx = 0;
int error;
- if (unlikely(hw->started == 0))
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
if (unlikely(nb_pkts < 1))
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 54f1e849..685cc4f8 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _VIRTIO_RXTX_H_
@@ -84,11 +55,9 @@ struct virtnet_ctl {
rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
uint16_t port_id; /**< Device port identifier. */
const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
+ rte_spinlock_t lock; /**< spinlock for control queue. */
};
int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
-int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
- struct rte_mbuf *m);
-
#endif /* _VIRTIO_RXTX_H_ */
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index b5bc1c49..51520758 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <stdint.h>
@@ -44,7 +15,7 @@
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
@@ -56,34 +27,6 @@
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
-int __attribute__((cold))
-virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
- struct rte_mbuf *cookie)
-{
- struct vq_desc_extra *dxp;
- struct vring_desc *start_dp;
- uint16_t desc_idx;
-
- cookie->port = vq->rxq.port_id;
-
- desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
- dxp = &vq->vq_descx[desc_idx];
- dxp->cookie = (void *)cookie;
- vq->sw_ring[desc_idx] = cookie;
-
- start_dp = vq->vq_ring.desc;
- start_dp[desc_idx].addr =
- VIRTIO_MBUF_ADDR(cookie, vq) +
- RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
- start_dp[desc_idx].len = cookie->buf_len -
- RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
-
- vq->vq_free_cnt--;
- vq->vq_avail_idx++;
-
- return 0;
-}
-
uint16_t
virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -99,14 +42,14 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
uint16_t nb_tx = 0;
- if (unlikely(hw->started == 0))
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
nb_used = VIRTQUEUE_NUSED(vq);
rte_compiler_barrier();
if (nb_used >= VIRTIO_TX_FREE_THRESH)
- virtio_xmit_cleanup(vq);
+ virtio_xmit_cleanup_simple(vq);
nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index f531c542..303904d6 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#ifndef _VIRTIO_RXTX_SIMPLE_H_
@@ -89,7 +60,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
#define VIRTIO_TX_FREE_NR 32
/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
static inline void
-virtio_xmit_cleanup(struct virtqueue *vq)
+virtio_xmit_cleanup_simple(struct virtqueue *vq)
{
uint16_t i, desc_idx;
uint32_t nb_free = 0;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index b8b93551..d6207d7b 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -1,34 +1,6 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2016
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
#include <stdint.h>
#include <stdio.h>
@@ -40,7 +12,7 @@
#include <rte_branch_prediction.h>
#include <rte_cycles.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_errno.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -52,9 +24,7 @@
#include "virtio_rxtx_simple.h"
-#define RTE_VIRTIO_VPMD_RX_BURST 32
#define RTE_VIRTIO_DESC_PER_LOOP 8
-#define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
*
diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
index 94f65143..d768d075 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <stdint.h>
@@ -43,7 +14,7 @@
#include <rte_branch_prediction.h>
#include <rte_cycles.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_errno.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -54,9 +25,7 @@
#include "virtio_rxtx_simple.h"
-#define RTE_VIRTIO_VPMD_RX_BURST 32
#define RTE_VIRTIO_DESC_PER_LOOP 8
-#define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
*
diff --git a/drivers/net/virtio/virtio_user/vhost.h b/drivers/net/virtio/virtio_user/vhost.h
index 5c983bd4..668cc99f 100644
--- a/drivers/net/virtio/virtio_user/vhost.h
+++ b/drivers/net/virtio/virtio_user/vhost.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#ifndef _VHOST_NET_USER_H
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
index 68d28b13..8d0a1ab2 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
*/
#include <sys/types.h>
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
index 689a5cff..1a47a348 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
*/
#include <unistd.h>
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
index eae340cc..7d52e6b7 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
*/
#include <sys/ioctl.h>
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index 97bd8326..91c6449b 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <sys/socket.h>
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 906d7a2b..f90fee9e 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <stdint.h>
@@ -128,6 +99,9 @@ virtio_user_start_device(struct virtio_user_dev *dev)
uint64_t features;
int ret;
+ /* Do not check return as already done in init, or reset in stop */
+ dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL);
+
/* Step 0: tell vhost to create queues */
if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
goto error;
@@ -171,6 +145,11 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
for (i = 0; i < dev->max_queue_pairs; ++i)
dev->ops->enable_qp(dev, i, 0);
+ if (dev->ops->send_request(dev, VHOST_USER_RESET_OWNER, NULL) < 0) {
+ PMD_DRV_LOG(INFO, "Failed to reset the device\n");
+ return -1;
+ }
+
return 0;
}
@@ -272,6 +251,7 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
/* For virtio vdev, no need to read counter for clean */
eth_dev->intr_handle->efd_counter_size = 0;
+ eth_dev->intr_handle->fd = -1;
if (dev->vhostfd >= 0)
eth_dev->intr_handle->fd = dev->vhostfd;
@@ -356,6 +336,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
PMD_INIT_LOG(ERR, "backend set up fails");
return -1;
}
+
if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
return -1;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 8361b6bd..64467b4f 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#ifndef _VIRTIO_USER_DEV_H
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 7be57ce6..26364900 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <stdint.h>
@@ -109,7 +80,11 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
} else {
dev->status |= VIRTIO_NET_S_LINK_UP;
}
- fcntl(dev->vhostfd, F_SETFL, flags & (~O_NONBLOCK));
+ if (fcntl(dev->vhostfd, F_SETFL,
+ flags & ~O_NONBLOCK) == -1) {
+ PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
+ return;
+ }
}
*(uint16_t *)dst = dev->status;
}
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index c3a536f8..a7d0a9cb 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <stdint.h>
@@ -37,6 +8,7 @@
#include "virtqueue.h"
#include "virtio_logs.h"
#include "virtio_pci.h"
+#include "virtio_rxtx_simple.h"
/*
* Two types of mbuf to be cleaned:
@@ -44,26 +16,50 @@
* 2) mbuf that hasn't been consued by backend.
*/
struct rte_mbuf *
-virtqueue_detatch_unused(struct virtqueue *vq)
+virtqueue_detach_unused(struct virtqueue *vq)
{
struct rte_mbuf *cookie;
- int idx;
+ struct virtio_hw *hw;
+ uint16_t start, end;
+ int type, idx;
- if (vq != NULL)
- for (idx = 0; idx < vq->vq_nentries; idx++) {
+ if (vq == NULL)
+ return NULL;
+
+ hw = vq->hw;
+ type = virtio_get_queue_type(hw, vq->vq_queue_index);
+ start = vq->vq_avail_idx & (vq->vq_nentries - 1);
+ end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
+
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ if (hw->use_simple_rx && type == VTNET_RQ) {
+ if (start <= end && idx >= start && idx < end)
+ continue;
+ if (start > end && (idx >= start || idx < end))
+ continue;
+ cookie = vq->sw_ring[idx];
+ if (cookie != NULL) {
+ vq->sw_ring[idx] = NULL;
+ return cookie;
+ }
+ } else {
cookie = vq->vq_descx[idx].cookie;
if (cookie != NULL) {
vq->vq_descx[idx].cookie = NULL;
return cookie;
}
}
+ }
+
return NULL;
}
/* Flush the elements in the used ring. */
void
-virtqueue_flush(struct virtqueue *vq)
+virtqueue_rxvq_flush(struct virtqueue *vq)
{
+ struct virtnet_rx *rxq = &vq->rxq;
+ struct virtio_hw *hw = vq->hw;
struct vring_used_elem *uep;
struct vq_desc_extra *dxp;
uint16_t used_idx, desc_idx;
@@ -74,13 +70,27 @@ virtqueue_flush(struct virtqueue *vq)
for (i = 0; i < nb_used; i++) {
used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
uep = &vq->vq_ring.used->ring[used_idx];
- desc_idx = (uint16_t)uep->id;
- dxp = &vq->vq_descx[desc_idx];
- if (dxp->cookie != NULL) {
- rte_pktmbuf_free(dxp->cookie);
- dxp->cookie = NULL;
+ if (hw->use_simple_rx) {
+ desc_idx = used_idx;
+ rte_pktmbuf_free(vq->sw_ring[desc_idx]);
+ vq->vq_free_cnt++;
+ } else {
+ desc_idx = (uint16_t)uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq_ring_free_chain(vq, desc_idx);
}
vq->vq_used_cons_idx++;
- vq_ring_free_chain(vq, desc_idx);
+ }
+
+ if (hw->use_simple_rx) {
+ while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxq);
+ if (virtqueue_kick_prepare(vq))
+ virtqueue_notify(vq);
+ }
}
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 2305d91a..14364f35 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _VIRTQUEUE_H_
@@ -158,6 +129,17 @@ struct virtio_net_ctrl_mac {
#define VIRTIO_NET_CTRL_VLAN_ADD 0
#define VIRTIO_NET_CTRL_VLAN_DEL 1
+/*
+ * Control link announce acknowledgement
+ *
+ * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
+ * driver has recevied the notification; device would clear the
+ * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
+ * this command.
+ */
+#define VIRTIO_NET_CTRL_ANNOUNCE 3
+#define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
+
struct virtio_net_ctrl_hdr {
uint8_t class;
uint8_t cmd;
@@ -226,8 +208,6 @@ struct virtqueue {
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
-#define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
-
/**
* This is the first element of the scatter-gather list. If you don't
* specify GSO or CSUM features, you can simply ignore the header.
@@ -301,10 +281,10 @@ void virtqueue_dump(struct virtqueue *vq);
/**
* Get all mbufs to be freed.
*/
-struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
/* Flush the elements in the used ring. */
-void virtqueue_flush(struct virtqueue *vq);
+void virtqueue_rxvq_flush(struct virtqueue *vq);
static inline int
virtqueue_full(const struct virtqueue *vq)
@@ -312,6 +292,17 @@ virtqueue_full(const struct virtqueue *vq)
return vq->vq_free_cnt == 0;
}
+static inline int
+virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
+{
+ if (vtpci_queue_idx == hw->max_queue_pairs * 2)
+ return VTNET_CQ;
+ else if (vtpci_queue_idx % 2 == 0)
+ return VTNET_RQ;
+ else
+ return VTNET_TQ;
+}
+
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index f09de96e..6bfbf019 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2015 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/vmxnet3/base/upt1_defs.h b/drivers/net/vmxnet3/base/upt1_defs.h
index d9144e32..cf9141b2 100644
--- a/drivers/net/vmxnet3/base/upt1_defs.h
+++ b/drivers/net/vmxnet3/base/upt1_defs.h
@@ -1,27 +1,7 @@
/*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
*
*********************************************************/
diff --git a/drivers/net/vmxnet3/base/vmware_pack_begin.h b/drivers/net/vmxnet3/base/vmware_pack_begin.h
index 860ec4c3..df22590f 100644
--- a/drivers/net/vmxnet3/base/vmware_pack_begin.h
+++ b/drivers/net/vmxnet3/base/vmware_pack_begin.h
@@ -1,32 +1,3 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
diff --git a/drivers/net/vmxnet3/base/vmware_pack_end.h b/drivers/net/vmxnet3/base/vmware_pack_end.h
index 860ec4c3..df22590f 100644
--- a/drivers/net/vmxnet3/base/vmware_pack_end.h
+++ b/drivers/net/vmxnet3/base/vmware_pack_end.h
@@ -1,32 +1,3 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h
index bfa9622d..a455e270 100644
--- a/drivers/net/vmxnet3/base/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h
@@ -1,27 +1,7 @@
/*********************************************************
* Copyright (C) 2007 VMware, Inc. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
*
*********************************************************/
diff --git a/drivers/net/vmxnet3/base/vmxnet3_osdep.h b/drivers/net/vmxnet3/base/vmxnet3_osdep.h
index b6e3469c..c9b92b04 100644
--- a/drivers/net/vmxnet3/base/vmxnet3_osdep.h
+++ b/drivers/net/vmxnet3/base/vmxnet3_osdep.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _VMXNET3_OSDEP_H
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 82d59ca8..4e68aae6 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <sys/queue.h>
@@ -56,7 +27,7 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
@@ -106,6 +77,9 @@ static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void vmxnet3_interrupt_handler(void *param);
+int vmxnet3_logtype_init;
+int vmxnet3_logtype_driver;
+
/*
* The set of PCI devices this driver supports
*/
@@ -1149,7 +1123,6 @@ vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
struct vmxnet3_hw *hw = dev->data->dev_private;
ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr));
- ether_addr_copy(mac_addr, &dev->data->mac_addrs[0]);
vmxnet3_write_mac(hw, mac_addr->addr_bytes);
}
@@ -1172,7 +1145,7 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ link.link_autoneg = ETH_LINK_AUTONEG;
}
vmxnet3_dev_atomic_write_link_status(dev, &link);
@@ -1332,7 +1305,7 @@ vmxnet3_process_events(struct rte_eth_dev *dev)
if (vmxnet3_dev_link_update(dev, 0) == 0)
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
/* Check if there is an error on xmit/recv queues */
@@ -1374,3 +1347,15 @@ vmxnet3_interrupt_handler(void *param)
RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(vmxnet3_init_log);
+static void
+vmxnet3_init_log(void)
+{
+ vmxnet3_logtype_init = rte_log_register("pmd.net.vmxnet3.init");
+ if (vmxnet3_logtype_init >= 0)
+ rte_log_set_level(vmxnet3_logtype_init, RTE_LOG_NOTICE);
+ vmxnet3_logtype_driver = rte_log_register("pmd.net.vmxnet3.driver");
+ if (vmxnet3_logtype_driver >= 0)
+ rte_log_set_level(vmxnet3_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index b48058af..b2a8cf35 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _VMXNET3_ETHDEV_H_
diff --git a/drivers/net/vmxnet3/vmxnet3_logs.h b/drivers/net/vmxnet3/vmxnet3_logs.h
index 82639a08..74154e3a 100644
--- a/drivers/net/vmxnet3/vmxnet3_logs.h
+++ b/drivers/net/vmxnet3/vmxnet3_logs.h
@@ -1,47 +1,15 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _VMXNET3_LOGS_H_
#define _VMXNET3_LOGS_H_
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_INIT
+extern int vmxnet3_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ rte_log(RTE_LOG_ ## level, vmxnet3_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
-#define PMD_INIT_FUNC_TRACE() do { } while(0)
-#endif
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -64,11 +32,9 @@
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
#endif
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+extern int vmxnet3_logtype_driver;
#define PMD_DRV_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
-#endif
+ rte_log(RTE_LOG_ ## level, vmxnet3_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
#endif /* _VMXNET3_LOGS_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
index a6fa93ac..552180e8 100644
--- a/drivers/net/vmxnet3/vmxnet3_ring.h
+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _VMXNET3_RING_H_
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index aa396ab2..3a8c62fc 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <sys/queue.h>
@@ -61,7 +32,7 @@
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_ip.h>
#include <rte_udp.h>
@@ -703,6 +674,8 @@ vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
if ((rcd->tcp || rcd->udp) && !rcd->tuc)
rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
}
+ } else {
+ rxm->packet_type = RTE_PTYPE_UNKNOWN;
}
}
diff --git a/drivers/raw/Makefile b/drivers/raw/Makefile
new file mode 100644
index 00000000..da7c8b44
--- /dev/null
+++ b/drivers/raw/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# DIRS-$(<configuration>) += <directory>
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/raw/skeleton_rawdev/Makefile b/drivers/raw/skeleton_rawdev/Makefile
new file mode 100644
index 00000000..bacc66dd
--- /dev/null
+++ b/drivers/raw/skeleton_rawdev/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_skeleton_rawdev.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+
+EXPORT_MAP := rte_pmd_skeleton_rawdev_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev_test.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/skeleton_rawdev/rte_pmd_skeleton_rawdev_version.map b/drivers/raw/skeleton_rawdev/rte_pmd_skeleton_rawdev_version.map
new file mode 100644
index 00000000..179140fb
--- /dev/null
+++ b/drivers/raw/skeleton_rawdev/rte_pmd_skeleton_rawdev_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+ local: *;
+};
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
new file mode 100644
index 00000000..6bdbbb50
--- /dev/null
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
@@ -0,0 +1,755 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_lcore.h>
+#include <rte_bus_vdev.h>
+
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include "skeleton_rawdev.h"
+
+/* Dynamic log type identifier */
+int skeleton_pmd_logtype;
+
+/* Count of instances */
+uint16_t skeldev_init_once;
+
+/**< Rawdev Skeleton dummy driver name */
+#define SKELETON_PMD_RAWDEV_NAME rawdev_skeleton
+
+/**< Skeleton rawdev driver object */
+static struct rte_vdev_driver skeleton_pmd_drv;
+
+struct queue_buffers {
+ void *bufs[SKELETON_QUEUE_MAX_DEPTH];
+};
+
+static struct queue_buffers queue_buf[SKELETON_MAX_QUEUES] = {};
+static void clear_queue_bufs(int queue_id);
+
+static void skeleton_rawdev_info_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t dev_info)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_conf *skeldev_conf;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev_info) {
+ SKELETON_PMD_ERR("Invalid request");
+ return;
+ }
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ skeldev_conf = dev_info;
+
+ skeldev_conf->num_queues = skeldev->num_queues;
+ skeldev_conf->capabilities = skeldev->capabilities;
+ skeldev_conf->device_state = skeldev->device_state;
+ skeldev_conf->firmware_state = skeldev->fw.firmware_state;
+}
+
+static int skeleton_rawdev_configure(const struct rte_rawdev *dev,
+ rte_rawdev_obj_t config)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_conf *skeldev_conf;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ if (!config) {
+ SKELETON_PMD_ERR("Invalid configuration");
+ return -EINVAL;
+ }
+
+ skeldev_conf = config;
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ if (skeldev_conf->num_queues <= SKELETON_MAX_QUEUES)
+ skeldev->num_queues = skeldev_conf->num_queues;
+ else
+ return -EINVAL;
+
+ skeldev->capabilities = skeldev_conf->capabilities;
+ skeldev->num_queues = skeldev_conf->num_queues;
+
+ return 0;
+}
+
+static int skeleton_rawdev_start(struct rte_rawdev *dev)
+{
+ int ret = 0;
+ struct skeleton_rawdev *skeldev;
+ enum skeleton_firmware_state fw_state;
+ enum skeleton_device_state device_state;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ fw_state = skeldev->fw.firmware_state;
+ device_state = skeldev->device_state;
+
+ if (fw_state == SKELETON_FW_LOADED &&
+ device_state == SKELETON_DEV_STOPPED) {
+ skeldev->device_state = SKELETON_DEV_RUNNING;
+ } else {
+ SKELETON_PMD_ERR("Device not ready for starting");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void skeleton_rawdev_stop(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (dev) {
+ skeldev = skeleton_rawdev_get_priv(dev);
+ skeldev->device_state = SKELETON_DEV_STOPPED;
+ }
+}
+
+static void
+reset_queues(struct skeleton_rawdev *skeldev)
+{
+ int i;
+
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++) {
+ skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH;
+ skeldev->queues[i].state = SKELETON_QUEUE_DETACH;
+ }
+}
+
+static void
+reset_attribute_table(struct skeleton_rawdev *skeldev)
+{
+ int i;
+
+ for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
+ if (skeldev->attr[i].name) {
+ free(skeldev->attr[i].name);
+ skeldev->attr[i].name = NULL;
+ }
+ }
+}
+
+static int skeleton_rawdev_close(struct rte_rawdev *dev)
+{
+ int ret = 0, i;
+ struct skeleton_rawdev *skeldev;
+ enum skeleton_firmware_state fw_state;
+ enum skeleton_device_state device_state;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ fw_state = skeldev->fw.firmware_state;
+ device_state = skeldev->device_state;
+
+ reset_queues(skeldev);
+ reset_attribute_table(skeldev);
+
+ switch (fw_state) {
+ case SKELETON_FW_LOADED:
+ if (device_state == SKELETON_DEV_RUNNING) {
+ SKELETON_PMD_ERR("Cannot close running device");
+ ret = -EINVAL;
+ } else {
+ /* Probably call fw reset here */
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+ }
+ break;
+ case SKELETON_FW_READY:
+ case SKELETON_FW_ERROR:
+ default:
+ SKELETON_PMD_DEBUG("Device already in stopped state");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* Clear all allocated queues */
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++)
+ clear_queue_bufs(i);
+
+ return ret;
+}
+
+static int skeleton_rawdev_reset(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ SKELETON_PMD_DEBUG("Resetting device");
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+
+ return 0;
+}
+
+static void skeleton_rawdev_queue_def_conf(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_queue *skelq;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !queue_conf)
+ return;
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ skelq = &skeldev->queues[queue_id];
+
+ if (queue_id < SKELETON_MAX_QUEUES)
+ rte_memcpy(queue_conf, skelq,
+ sizeof(struct skeleton_rawdev_queue));
+}
+
+static void
+clear_queue_bufs(int queue_id)
+{
+ int i;
+
+ /* Clear buffers for queue_id */
+ for (i = 0; i < SKELETON_QUEUE_MAX_DEPTH; i++)
+ queue_buf[queue_id].bufs[i] = NULL;
+}
+
+static int skeleton_rawdev_queue_setup(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ int ret = 0;
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_rawdev_queue *q;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !queue_conf)
+ return -EINVAL;
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ q = &skeldev->queues[queue_id];
+
+ if (skeldev->num_queues > queue_id &&
+ q->depth < SKELETON_QUEUE_MAX_DEPTH) {
+ rte_memcpy(q, queue_conf,
+ sizeof(struct skeleton_rawdev_queue));
+ clear_queue_bufs(queue_id);
+ } else {
+ SKELETON_PMD_ERR("Invalid queue configuration");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int skeleton_rawdev_queue_release(struct rte_rawdev *dev,
+ uint16_t queue_id)
+{
+ int ret = 0;
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ if (skeldev->num_queues > queue_id) {
+ skeldev->queues[queue_id].state = SKELETON_QUEUE_DETACH;
+ skeldev->queues[queue_id].depth = SKELETON_QUEUE_DEF_DEPTH;
+ clear_queue_bufs(queue_id);
+ } else {
+ SKELETON_PMD_ERR("Invalid queue configuration");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int skeleton_rawdev_get_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ uint64_t *attr_value)
+{
+ int i;
+ uint8_t done = 0;
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !attr_name || !attr_value) {
+ SKELETON_PMD_ERR("Invalid arguments for getting attributes");
+ return -EINVAL;
+ }
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
+ if (!skeldev->attr[i].name)
+ continue;
+
+ if (!strncmp(skeldev->attr[i].name, attr_name,
+ SKELETON_ATTRIBUTE_NAME_MAX)) {
+ *attr_value = skeldev->attr[i].value;
+ done = 1;
+ SKELETON_PMD_DEBUG("Attribute (%s) Value (%" PRIu64 ")",
+ attr_name, *attr_value);
+ break;
+ }
+ }
+
+ if (done)
+ return 0;
+
+ /* Attribute not found */
+ return -EINVAL;
+}
+
+static int skeleton_rawdev_set_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ const uint64_t attr_value)
+{
+ int i;
+ uint8_t done = 0;
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ if (!dev || !attr_name) {
+ SKELETON_PMD_ERR("Invalid arguments for setting attributes");
+ return -EINVAL;
+ }
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ /* Check if attribute already exists */
+ for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
+ if (!skeldev->attr[i].name)
+ break;
+
+ if (!strncmp(skeldev->attr[i].name, attr_name,
+ SKELETON_ATTRIBUTE_NAME_MAX)) {
+ /* Update value */
+ skeldev->attr[i].value = attr_value;
+ done = 1;
+ break;
+ }
+ }
+
+ if (!done) {
+ if (i < (SKELETON_MAX_ATTRIBUTES - 1)) {
+ /* There is still space to insert one more */
+ skeldev->attr[i].name = strdup(attr_name);
+ if (!skeldev->attr[i].name)
+ return -ENOMEM;
+
+ skeldev->attr[i].value = attr_value;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int skeleton_rawdev_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ unsigned int i;
+ uint16_t q_id;
+ RTE_SET_USED(dev);
+
+ /* context is essentially the queue_id which is
+ * transferred as opaque object through the library layer. This can
+ * help in complex implementation which require more information than
+ * just an integer - for example, a queue-pair.
+ */
+ q_id = *((int *)context);
+
+ for (i = 0; i < count; i++)
+ queue_buf[q_id].bufs[i] = buffers[i]->buf_addr;
+
+ return i;
+}
+
+static int skeleton_rawdev_dequeue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ unsigned int i;
+ uint16_t q_id;
+ RTE_SET_USED(dev);
+
+ /* context is essentially the queue_id which is
+ * transferred as opaque object through the library layer. This can
+ * help in complex implementation which require more information than
+ * just an integer - for example, a queue-pair.
+ */
+ q_id = *((int *)context);
+
+ for (i = 0; i < count; i++)
+ buffers[i]->buf_addr = queue_buf[q_id].bufs[i];
+
+ return i;
+}
+
+static int skeleton_rawdev_dump(struct rte_rawdev *dev, FILE *f)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(f);
+
+ return 0;
+}
+
+static int skeleton_rawdev_firmware_status_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t status_info)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ if (status_info)
+ memcpy(status_info, &skeldev->fw.firmware_state,
+ sizeof(enum skeleton_firmware_state));
+
+ return 0;
+}
+
+
+static int skeleton_rawdev_firmware_version_get(
+ struct rte_rawdev *dev,
+ rte_rawdev_obj_t version_info)
+{
+ struct skeleton_rawdev *skeldev;
+ struct skeleton_firmware_version_info *vi;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ vi = version_info;
+
+ vi->major = skeldev->fw.firmware_version.major;
+ vi->minor = skeldev->fw.firmware_version.minor;
+ vi->subrel = skeldev->fw.firmware_version.subrel;
+
+ return 0;
+}
+
+static int skeleton_rawdev_firmware_load(struct rte_rawdev *dev,
+ rte_rawdev_obj_t firmware_buf)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ /* firmware_buf is a mmaped, possibly DMA'able area, buffer. Being
+ * dummy, all this does is check if firmware_buf is not NULL and
+ * sets the state of the firmware.
+ */
+ if (!firmware_buf)
+ return -EINVAL;
+
+ skeldev->fw.firmware_state = SKELETON_FW_LOADED;
+
+ return 0;
+}
+
+static int skeleton_rawdev_firmware_unload(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+
+ return 0;
+}
+
+static const struct rte_rawdev_ops skeleton_rawdev_ops = {
+ .dev_info_get = skeleton_rawdev_info_get,
+ .dev_configure = skeleton_rawdev_configure,
+ .dev_start = skeleton_rawdev_start,
+ .dev_stop = skeleton_rawdev_stop,
+ .dev_close = skeleton_rawdev_close,
+ .dev_reset = skeleton_rawdev_reset,
+
+ .queue_def_conf = skeleton_rawdev_queue_def_conf,
+ .queue_setup = skeleton_rawdev_queue_setup,
+ .queue_release = skeleton_rawdev_queue_release,
+
+ .attr_get = skeleton_rawdev_get_attr,
+ .attr_set = skeleton_rawdev_set_attr,
+
+ .enqueue_bufs = skeleton_rawdev_enqueue_bufs,
+ .dequeue_bufs = skeleton_rawdev_dequeue_bufs,
+
+ .dump = skeleton_rawdev_dump,
+
+ .xstats_get = NULL,
+ .xstats_get_names = NULL,
+ .xstats_get_by_name = NULL,
+ .xstats_reset = NULL,
+
+ .firmware_status_get = skeleton_rawdev_firmware_status_get,
+ .firmware_version_get = skeleton_rawdev_firmware_version_get,
+ .firmware_load = skeleton_rawdev_firmware_load,
+ .firmware_unload = skeleton_rawdev_firmware_unload,
+
+ .dev_selftest = test_rawdev_skeldev,
+};
+
+static int
+skeleton_rawdev_create(const char *name,
+ struct rte_vdev_device *vdev,
+ int socket_id)
+{
+ int ret = 0, i;
+ struct rte_rawdev *rawdev = NULL;
+ struct skeleton_rawdev *skeldev = NULL;
+
+ if (!name) {
+ SKELETON_PMD_ERR("Invalid name of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct skeleton_rawdev),
+ socket_id);
+ if (rawdev == NULL) {
+ SKELETON_PMD_ERR("Unable to allocate rawdevice");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ rawdev->dev_ops = &skeleton_rawdev_ops;
+ rawdev->device = &vdev->device;
+ rawdev->driver_name = vdev->device.driver->name;
+
+ skeldev = skeleton_rawdev_get_priv(rawdev);
+
+ skeldev->device_id = SKELETON_DEVICE_ID;
+ skeldev->vendor_id = SKELETON_VENDOR_ID;
+ skeldev->capabilities = SKELETON_DEFAULT_CAPA;
+
+ memset(&skeldev->fw, 0, sizeof(struct skeleton_firmware));
+
+ skeldev->fw.firmware_state = SKELETON_FW_READY;
+ skeldev->fw.firmware_version.major = SKELETON_MAJOR_VER;
+ skeldev->fw.firmware_version.minor = SKELETON_MINOR_VER;
+ skeldev->fw.firmware_version.subrel = SKELETON_SUB_VER;
+
+ skeldev->device_state = SKELETON_DEV_STOPPED;
+
+ /* Reset/set to default queue configuration for this device */
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++) {
+ skeldev->queues[i].state = SKELETON_QUEUE_DETACH;
+ skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH;
+ }
+
+ /* Clear all allocated queue buffers */
+ for (i = 0; i < SKELETON_MAX_QUEUES; i++)
+ clear_queue_bufs(i);
+
+ return ret;
+
+cleanup:
+ if (rawdev)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+skeleton_rawdev_destroy(const char *name)
+{
+ int ret;
+ struct rte_rawdev *rdev;
+
+ if (!name) {
+ SKELETON_PMD_ERR("Invalid device name");
+ return -EINVAL;
+ }
+
+ rdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rdev) {
+ SKELETON_PMD_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ /* rte_rawdev_close is called by pmd_release */
+ ret = rte_rawdev_pmd_release(rdev);
+ if (ret)
+ SKELETON_PMD_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+skeldev_get_selftest(const char *key __rte_unused,
+ const char *value,
+ void *opaque)
+{
+ int *flag = opaque;
+ *flag = atoi(value);
+ return 0;
+}
+
+static int
+skeldev_parse_vdev_args(struct rte_vdev_device *vdev)
+{
+ int selftest = 0;
+ const char *name;
+ const char *params;
+
+ static const char *const args[] = {
+ SKELETON_SELFTEST_ARG,
+ NULL
+ };
+
+ name = rte_vdev_device_name(vdev);
+
+ params = rte_vdev_device_args(vdev);
+ if (params != NULL && params[0] != '\0') {
+ struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+ if (!kvlist) {
+ SKELETON_PMD_INFO(
+ "Ignoring unsupported params supplied '%s'",
+ name);
+ } else {
+ int ret = rte_kvargs_process(kvlist,
+ SKELETON_SELFTEST_ARG,
+ skeldev_get_selftest, &selftest);
+ if (ret != 0 || (selftest < 0 || selftest > 1)) {
+ SKELETON_PMD_ERR("%s: Error in parsing args",
+ name);
+ rte_kvargs_free(kvlist);
+ ret = -1; /* enforce if selftest is invalid */
+ return ret;
+ }
+ }
+
+ rte_kvargs_free(kvlist);
+ }
+
+ return selftest;
+}
+
+static int
+skeleton_rawdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int selftest = 0, ret = 0;
+
+
+ name = rte_vdev_device_name(vdev);
+ /* More than one instance is not supported */
+ if (skeldev_init_once) {
+ SKELETON_PMD_ERR("Multiple instance not supported for %s",
+ name);
+ return -EINVAL;
+ }
+
+ SKELETON_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ selftest = skeldev_parse_vdev_args(vdev);
+ /* In case of invalid argument, selftest != 1; ignore other values */
+
+ ret = skeleton_rawdev_create(name, vdev, rte_socket_id());
+ if (!ret) {
+ /* In case command line argument for 'selftest' was passed;
+ * if invalid arguments were passed, execution continues but
+ * without selftest.
+ */
+ if (selftest == 1)
+ test_rawdev_skeldev();
+ }
+
+ /* Device instance created; Second instance not possible */
+ skeldev_init_once = 1;
+
+ return ret;
+}
+
+static int
+skeleton_rawdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+
+ SKELETON_PMD_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
+
+ ret = skeleton_rawdev_destroy(name);
+ if (!ret)
+ skeldev_init_once = 0;
+
+ return ret;
+}
+
+static struct rte_vdev_driver skeleton_pmd_drv = {
+ .probe = skeleton_rawdev_probe,
+ .remove = skeleton_rawdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(SKELETON_PMD_RAWDEV_NAME, skeleton_pmd_drv);
+
+RTE_INIT(skeleton_pmd_init_log);
+
+static void
+skeleton_pmd_init_log(void)
+{
+ skeleton_pmd_logtype = rte_log_register("rawdev.skeleton");
+ if (skeleton_pmd_logtype >= 0)
+ rte_log_set_level(skeleton_pmd_logtype, RTE_LOG_INFO);
+}
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev.h b/drivers/raw/skeleton_rawdev/skeleton_rawdev.h
new file mode 100644
index 00000000..5045b592
--- /dev/null
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef __SKELETON_RAWDEV_H__
+#define __SKELETON_RAWDEV_H__
+
+#include <rte_rawdev.h>
+
+extern int skeleton_pmd_logtype;
+
+#define SKELETON_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, skeleton_pmd_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define SKELETON_PMD_FUNC_TRACE() SKELETON_PMD_LOG(DEBUG, ">>")
+
+#define SKELETON_PMD_DEBUG(fmt, args...) \
+ SKELETON_PMD_LOG(DEBUG, fmt, ## args)
+#define SKELETON_PMD_INFO(fmt, args...) \
+ SKELETON_PMD_LOG(INFO, fmt, ## args)
+#define SKELETON_PMD_ERR(fmt, args...) \
+ SKELETON_PMD_LOG(ERR, fmt, ## args)
+#define SKELETON_PMD_WARN(fmt, args...) \
+ SKELETON_PMD_LOG(WARNING, fmt, ## args)
+/* Macros for self test application */
+#define SKELETON_TEST_INFO SKELETON_PMD_INFO
+#define SKELETON_TEST_DEBUG SKELETON_PMD_DEBUG
+#define SKELETON_TEST_ERR SKELETON_PMD_ERR
+#define SKELETON_TEST_WARN SKELETON_PMD_WARN
+
+#define SKELETON_SELFTEST_ARG ("selftest")
+
+#define SKELETON_VENDOR_ID 0x10
+#define SKELETON_DEVICE_ID 0x01
+
+#define SKELETON_MAJOR_VER 1
+#define SKELETON_MINOR_VER 0
+#define SKELETON_SUB_VER 0
+
+#define SKELETON_MAX_QUEUES 1
+
+enum skeleton_firmware_state {
+ SKELETON_FW_READY,
+ SKELETON_FW_LOADED,
+ SKELETON_FW_ERROR
+};
+
+enum skeleton_device_state {
+ SKELETON_DEV_RUNNING,
+ SKELETON_DEV_STOPPED
+};
+
+enum skeleton_queue_state {
+ SKELETON_QUEUE_DETACH,
+ SKELETON_QUEUE_ATTACH
+};
+
+#define SKELETON_QUEUE_DEF_DEPTH 10
+#define SKELETON_QUEUE_MAX_DEPTH 25
+
+struct skeleton_firmware_version_info {
+ uint8_t major;
+ uint8_t minor;
+ uint8_t subrel;
+};
+
+struct skeleton_firmware {
+ /**< Device firmware information */
+ struct skeleton_firmware_version_info firmware_version;
+ /**< Device state */
+ enum skeleton_firmware_state firmware_state;
+
+};
+
+#define SKELETON_MAX_ATTRIBUTES 10
+#define SKELETON_ATTRIBUTE_NAME_MAX 20
+
+struct skeleton_rawdev_attributes {
+ /**< Name of the attribute */
+ char *name;
+ /**< Value or reference of value of attribute */
+ uint64_t value;
+};
+
+/**< Device supports firmware loading/unloading */
+#define SKELETON_CAPA_FW_LOAD 0x0001
+/**< Device supports firmware reset */
+#define SKELETON_CAPA_FW_RESET 0x0002
+/**< Device support queue based communication */
+#define SKELETON_CAPA_QUEUES 0x0004
+/**< Default Capabilities: FW_LOAD, FW_RESET, QUEUES */
+#define SKELETON_DEFAULT_CAPA 0x7
+
+struct skeleton_rawdev_queue {
+ uint8_t state;
+ uint32_t depth;
+};
+
+struct skeleton_rawdev {
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t num_queues;
+ /**< One of SKELETON_CAPA_* */
+ uint16_t capabilities;
+ /**< State of device; linked to firmware state */
+ enum skeleton_device_state device_state;
+ /**< Firmware configuration */
+ struct skeleton_firmware fw;
+ /**< Collection of all communication channels - which can be referred
+ * to as queues.
+ */
+ struct skeleton_rawdev_queue queues[SKELETON_MAX_QUEUES];
+ /**< Global table containing various pre-defined and user-defined
+ * attributes.
+ */
+ struct skeleton_rawdev_attributes attr[SKELETON_MAX_ATTRIBUTES];
+ struct rte_device *device;
+};
+
+struct skeleton_rawdev_conf {
+ uint16_t num_queues;
+ unsigned int capabilities;
+ enum skeleton_device_state device_state;
+ enum skeleton_firmware_state firmware_state;
+};
+
+static inline struct skeleton_rawdev *
+skeleton_rawdev_get_priv(const struct rte_rawdev *rawdev)
+{
+ return rawdev->dev_private;
+}
+
+int test_rawdev_skeldev(void);
+
+#endif /* __SKELETON_RAWDEV_H__ */
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
new file mode 100644
index 00000000..795f24bc
--- /dev/null
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_dev.h>
+#include <rte_rawdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_test.h>
+
+/* Using relative path as skeleton_rawdev is not part of exported headers */
+#include "skeleton_rawdev.h"
+
+#define TEST_DEV_ID 0
+#define TEST_DEV_NAME "rawdev_skeleton"
+
+#define SKELDEV_LOGS(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, skeleton_pmd_logtype, fmt "\n", \
+ ##args)
+
+#define SKELDEV_TEST_INFO(fmt, args...) \
+ SKELDEV_LOGS(INFO, fmt, ## args)
+#define SKELDEV_TEST_DEBUG(fmt, args...) \
+ SKELDEV_LOGS(DEBUG, fmt, ## args)
+
+#define SKELDEV_TEST_RUN(setup, teardown, test) \
+ skeldev_test_run(setup, teardown, test, #test)
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED -1
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int
+testsuite_setup(void)
+{
+ uint8_t count;
+ count = rte_rawdev_count();
+ if (!count) {
+ SKELDEV_TEST_INFO("\tNo existing rawdev; "
+ "Creating 'skeldev_rawdev'");
+ return rte_vdev_init(TEST_DEV_NAME, NULL);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static void local_teardown(void);
+
+static void
+testsuite_teardown(void)
+{
+ local_teardown();
+}
+
+static void
+local_teardown(void)
+{
+ rte_vdev_uninit(TEST_DEV_NAME);
+}
+
+static int
+test_rawdev_count(void)
+{
+ uint8_t count;
+ count = rte_rawdev_count();
+ RTE_TEST_ASSERT(count > 0, "Invalid rawdev count %" PRIu8, count);
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_get_dev_id(void)
+{
+ int ret;
+ ret = rte_rawdev_get_dev_id("invalid_rawdev_device");
+ RTE_TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d",
+ ret);
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_socket_id(void)
+{
+ int socket_id;
+ socket_id = rte_rawdev_socket_id(TEST_DEV_ID);
+ RTE_TEST_ASSERT(socket_id != -EINVAL,
+ "Failed to get socket_id %d", socket_id);
+ socket_id = rte_rawdev_socket_id(RTE_RAWDEV_MAX_DEVS);
+ RTE_TEST_ASSERT(socket_id == -EINVAL,
+ "Expected -EINVAL %d", socket_id);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_info_get(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf skel_conf = {0};
+
+ ret = rte_rawdev_info_get(TEST_DEV_ID, NULL);
+ RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ rdev_info.dev_private = &skel_conf;
+
+ ret = rte_rawdev_info_get(TEST_DEV_ID, &rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get raw dev info");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_configure(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_set = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+
+ /* Check invalid configuration */
+ ret = rte_rawdev_configure(TEST_DEV_ID, NULL);
+ RTE_TEST_ASSERT(ret == -EINVAL,
+ "Null configure; Expected -EINVAL, got %d", ret);
+
+ /* Valid configuration test */
+ rdev_conf_set.num_queues = 1;
+ rdev_conf_set.capabilities = SKELETON_CAPA_FW_LOAD |
+ SKELETON_CAPA_FW_RESET;
+
+ rdev_info.dev_private = &rdev_conf_set;
+ ret = rte_rawdev_configure(TEST_DEV_ID,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure rawdev (%d)", ret);
+
+ rdev_info.dev_private = &rdev_conf_get;
+ ret = rte_rawdev_info_get(TEST_DEV_ID,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_set.num_queues,
+ rdev_conf_get.num_queues,
+ "Configuration test failed; num_queues (%d)(%d)",
+ rdev_conf_set.num_queues,
+ rdev_conf_get.num_queues);
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_set.capabilities,
+ rdev_conf_get.capabilities,
+ "Configuration test failed; capabilities");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_queue_default_conf_get(void)
+{
+ int ret, i;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+ struct skeleton_rawdev_queue q = {0};
+
+ /* Get the current configuration */
+ rdev_info.dev_private = &rdev_conf_get;
+ ret = rte_rawdev_info_get(TEST_DEV_ID,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain rawdev configuration (%d)",
+ ret);
+
+ /* call to test_rawdev_configure would have set the num_queues = 1 */
+ RTE_TEST_ASSERT_SUCCESS(!(rdev_conf_get.num_queues > 0),
+ "Invalid number of queues (%d). Expected 1",
+ rdev_conf_get.num_queues);
+ /* All queues by default should have state = DETACH and
+ * depth = DEF_DEPTH
+ */
+ for (i = 0; i < rdev_conf_get.num_queues; i++) {
+ rte_rawdev_queue_conf_get(TEST_DEV_ID, i, &q);
+ RTE_TEST_ASSERT_EQUAL(q.depth, SKELETON_QUEUE_DEF_DEPTH,
+ "Invalid default depth of queue (%d)",
+ q.depth);
+ RTE_TEST_ASSERT_EQUAL(q.state, SKELETON_QUEUE_DETACH,
+ "Invalid default state of queue (%d)",
+ q.state);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_queue_setup(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+ struct skeleton_rawdev_queue qset = {0};
+ struct skeleton_rawdev_queue qget = {0};
+
+ /* Get the current configuration */
+ rdev_info.dev_private = &rdev_conf_get;
+ ret = rte_rawdev_info_get(TEST_DEV_ID,
+ (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+
+ /* call to test_rawdev_configure would have set the num_queues = 1 */
+ RTE_TEST_ASSERT_SUCCESS(!(rdev_conf_get.num_queues > 0),
+ "Invalid number of queues (%d). Expected 1",
+ rdev_conf_get.num_queues);
+
+ /* Modify the queue depth for Queue 0 and attach it */
+ qset.depth = 15;
+ qset.state = SKELETON_QUEUE_ATTACH;
+ ret = rte_rawdev_queue_setup(TEST_DEV_ID, 0, &qset);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue (%d)", ret);
+
+ /* Now, fetching the queue 0 should show depth as 15 */
+ ret = rte_rawdev_queue_conf_get(TEST_DEV_ID, 0, &qget);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get queue config (%d)", ret);
+
+ RTE_TEST_ASSERT_EQUAL(qset.depth, qget.depth,
+ "Failed to set queue depth: Need(%d), has(%d)",
+ qset.depth, qget.depth);
+
+ return TEST_SUCCESS;
+}
+
+/* After executing test_rawdev_queue_setup, queue_id=0 would have depth as 15.
+ * Releasing should set it back to default. state would set to DETACH
+ */
+static int
+test_rawdev_queue_release(void)
+{
+ int ret;
+ struct skeleton_rawdev_queue qget = {0};
+
+ /* Now, fetching the queue 0 should show depth as 100 */
+ ret = rte_rawdev_queue_release(TEST_DEV_ID, 0);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to release queue 0; (%d)", ret);
+
+ /* Now, fetching the queue 0 should show depth as default */
+ ret = rte_rawdev_queue_conf_get(TEST_DEV_ID, 0, &qget);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get queue config (%d)", ret);
+
+ RTE_TEST_ASSERT_EQUAL(qget.depth, SKELETON_QUEUE_DEF_DEPTH,
+ "Release of Queue 0 failed; (depth)");
+
+ RTE_TEST_ASSERT_EQUAL(qget.state, SKELETON_QUEUE_DETACH,
+ "Release of Queue 0 failed; (state)");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_attr_set_get(void)
+{
+ int ret;
+ int *dummy_value;
+ uint64_t ret_value;
+
+ /* Set an attribute and fetch it */
+ ret = rte_rawdev_set_attr(TEST_DEV_ID, "Test1", 100);
+ RTE_TEST_ASSERT(!ret, "Unable to set an attribute (Test1)");
+
+ dummy_value = malloc(sizeof(int));
+ if (!dummy_value)
+ RTE_TEST_ASSERT(1, "Unable to allocate memory (dummy_value)");
+
+ *dummy_value = 200;
+ ret = rte_rawdev_set_attr(TEST_DEV_ID, "Test2", (uintptr_t)dummy_value);
+
+ /* Check if attributes have been set */
+ ret = rte_rawdev_get_attr(TEST_DEV_ID, "Test1", &ret_value);
+ RTE_TEST_ASSERT_EQUAL(ret_value, 100,
+ "Attribute (Test1) not set correctly (%" PRIu64 ")",
+ ret_value);
+
+ ret_value = 0;
+ ret = rte_rawdev_get_attr(TEST_DEV_ID, "Test2", &ret_value);
+ RTE_TEST_ASSERT_EQUAL(*((int *)(uintptr_t)ret_value), 200,
+ "Attribute (Test2) not set correctly (%" PRIu64 ")",
+ ret_value);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_start_stop(void)
+{
+ int ret;
+ struct rte_rawdev_info rdev_info = {0};
+ struct skeleton_rawdev_conf rdev_conf_get = {0};
+ char *dummy_firmware = NULL;
+
+ /* Get the current configuration */
+ rdev_info.dev_private = &rdev_conf_get;
+
+ /* Load a firmware using a dummy address area */
+ dummy_firmware = rte_zmalloc("RAWDEV SKELETON", sizeof(int) * 10, 0);
+ RTE_TEST_ASSERT(dummy_firmware != NULL,
+ "Failed to create firmware memory backing");
+
+ ret = rte_rawdev_firmware_load(TEST_DEV_ID, dummy_firmware);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Firmware loading failed (%d)", ret);
+
+ /* Skeleton doesn't do anything with the firmware area - that is dummy
+ * and can be removed.
+ */
+ rte_free(dummy_firmware);
+ dummy_firmware = NULL;
+
+ rte_rawdev_start(TEST_DEV_ID);
+ ret = rte_rawdev_info_get(TEST_DEV_ID, (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_get.device_state, SKELETON_DEV_RUNNING,
+ "Device start failed. State is (%d)",
+ rdev_conf_get.device_state);
+
+ rte_rawdev_stop(TEST_DEV_ID);
+ ret = rte_rawdev_info_get(TEST_DEV_ID, (rte_rawdev_obj_t)&rdev_info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to obtain rawdev configuration (%d)",
+ ret);
+ RTE_TEST_ASSERT_EQUAL(rdev_conf_get.device_state, SKELETON_DEV_STOPPED,
+ "Device stop failed. State is (%d)",
+ rdev_conf_get.device_state);
+
+ /* Unloading the firmware once device is stopped */
+ ret = rte_rawdev_firmware_unload(TEST_DEV_ID);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Failed to unload firmware (%d)", ret);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_rawdev_enqdeq(void)
+{
+ int ret;
+ unsigned int count = 1;
+ uint16_t queue_id = 0;
+ struct rte_rawdev_buf buffers[1];
+ struct rte_rawdev_buf *deq_buffers = NULL;
+
+ buffers[0].buf_addr = malloc(strlen(TEST_DEV_NAME) + 3);
+ if (!buffers[0].buf_addr)
+ goto cleanup;
+ snprintf(buffers[0].buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d",
+ TEST_DEV_NAME, 0);
+
+ ret = rte_rawdev_enqueue_buffers(TEST_DEV_ID,
+ (struct rte_rawdev_buf **)&buffers,
+ count, &queue_id);
+ RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count,
+ "Unable to enqueue buffers");
+
+ deq_buffers = malloc(sizeof(struct rte_rawdev_buf) * count);
+ if (!deq_buffers)
+ goto cleanup;
+
+ ret = rte_rawdev_dequeue_buffers(TEST_DEV_ID,
+ (struct rte_rawdev_buf **)&deq_buffers,
+ count, &queue_id);
+ RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count,
+ "Unable to dequeue buffers");
+
+ if (deq_buffers)
+ free(deq_buffers);
+
+ return TEST_SUCCESS;
+cleanup:
+ if (buffers[0].buf_addr)
+ free(buffers[0].buf_addr);
+ if (deq_buffers)
+ free(deq_buffers);
+
+ return TEST_FAILED;
+}
+
+static void skeldev_test_run(int (*setup)(void),
+ void (*teardown)(void),
+ int (*test)(void),
+ const char *name)
+{
+ int ret = 0;
+
+ if (setup) {
+ ret = setup();
+ if (ret < 0) {
+ SKELDEV_TEST_INFO("Error setting up test %s", name);
+ unsupported++;
+ }
+ }
+
+ if (test) {
+ ret = test();
+ if (ret < 0) {
+ failed++;
+ SKELDEV_TEST_INFO("%s Failed", name);
+ } else {
+ passed++;
+ SKELDEV_TEST_DEBUG("%s Passed", name);
+ }
+ }
+
+ if (teardown)
+ teardown();
+
+ total++;
+}
+
+int
+test_rawdev_skeldev(void)
+{
+ testsuite_setup();
+
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_count);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_get_dev_id);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_socket_id);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_info_get);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_configure);
+ SKELDEV_TEST_RUN(test_rawdev_configure, NULL,
+ test_rawdev_queue_default_conf_get);
+ SKELDEV_TEST_RUN(test_rawdev_configure, NULL, test_rawdev_queue_setup);
+ SKELDEV_TEST_RUN(test_rawdev_queue_setup, NULL,
+ test_rawdev_queue_release);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_attr_set_get);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_start_stop);
+ SKELDEV_TEST_RUN(test_rawdev_queue_setup, NULL, test_rawdev_enqdeq);
+
+ testsuite_teardown();
+
+ SKELDEV_TEST_INFO("Total tests : %d", total);
+ SKELDEV_TEST_INFO("Passed : %d", passed);
+ SKELDEV_TEST_INFO("Failed : %d", failed);
+ SKELDEV_TEST_INFO("Not supported : %d", unsupported);
+
+ if (failed)
+ return -1;
+
+ return 0;
+};