aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/baseband/null/bbdev_null.c4
-rw-r--r--drivers/baseband/turbo_sw/bbdev_turbo_software.c12
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/dpaa/base/fman/fman_hw.c62
-rw-r--r--drivers/bus/dpaa/base/fman/netcfg_layer.c5
-rw-r--r--drivers/bus/dpaa/base/fman/of.c44
-rw-r--r--drivers/bus/dpaa/base/qbman/bman_driver.c4
-rw-r--r--drivers/bus/dpaa/base/qbman/qman.c21
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_driver.c4
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_priv.h1
-rw-r--r--drivers/bus/dpaa/dpaa_bus.c20
-rw-r--r--drivers/bus/dpaa/include/compat.h6
-rw-r--r--drivers/bus/dpaa/include/fsl_fman.h6
-rw-r--r--drivers/bus/dpaa/include/fsl_qman.h3
-rw-r--r--drivers/bus/dpaa/include/of.h2
-rw-r--r--drivers/bus/dpaa/rte_bus_dpaa_version.map10
-rw-r--r--drivers/bus/dpaa/rte_dpaa_bus.h3
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c4
-rw-r--r--drivers/bus/fslmc/fslmc_logs.h2
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.c3
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.h1
-rw-r--r--drivers/bus/fslmc/rte_fslmc.h6
-rw-r--r--drivers/bus/ifpga/rte_bus_ifpga.h3
-rw-r--r--drivers/bus/meson.build2
-rw-r--r--drivers/bus/pci/linux/pci.c1
-rw-r--r--drivers/bus/pci/linux/pci_uio.c47
-rw-r--r--drivers/bus/pci/linux/pci_vfio.c118
-rw-r--r--drivers/bus/pci/pci_common.c97
-rw-r--r--drivers/bus/pci/private.h50
-rw-r--r--drivers/bus/pci/rte_bus_pci.h5
-rw-r--r--drivers/bus/vdev/rte_bus_vdev.h3
-rw-r--r--drivers/bus/vmbus/Makefile36
-rw-r--r--drivers/bus/vmbus/linux/Makefile3
-rw-r--r--drivers/bus/vmbus/linux/vmbus_bus.c355
-rw-r--r--drivers/bus/vmbus/linux/vmbus_uio.c398
-rw-r--r--drivers/bus/vmbus/meson.build18
-rw-r--r--drivers/bus/vmbus/private.h132
-rw-r--r--drivers/bus/vmbus/rte_bus_vmbus.h407
-rw-r--r--drivers/bus/vmbus/rte_bus_vmbus_version.map29
-rw-r--r--drivers/bus/vmbus/rte_vmbus_reg.h344
-rw-r--r--drivers/bus/vmbus/vmbus_bufring.c244
-rw-r--r--drivers/bus/vmbus/vmbus_channel.c405
-rw-r--r--drivers/bus/vmbus/vmbus_common.c286
-rw-r--r--drivers/bus/vmbus/vmbus_common_uio.c232
-rw-r--r--drivers/common/meson.build2
-rw-r--r--drivers/common/octeontx/octeontx_mbox.c4
-rw-r--r--drivers/common/qat/Makefile66
-rw-r--r--drivers/common/qat/meson.build14
-rw-r--r--drivers/common/qat/qat_adf/adf_transport_access_macros.h (renamed from drivers/crypto/qat/qat_adf/adf_transport_access_macros.h)9
-rw-r--r--drivers/common/qat/qat_adf/icp_qat_fw.h (renamed from drivers/crypto/qat/qat_adf/icp_qat_fw.h)69
-rw-r--r--drivers/common/qat/qat_adf/icp_qat_fw_comp.h482
-rw-r--r--drivers/common/qat/qat_adf/icp_qat_fw_la.h (renamed from drivers/crypto/qat/qat_adf/icp_qat_fw_la.h)0
-rw-r--r--drivers/common/qat/qat_adf/icp_qat_hw.h (renamed from drivers/crypto/qat/qat_adf/icp_qat_hw.h)130
-rw-r--r--drivers/common/qat/qat_common.c123
-rw-r--r--drivers/common/qat/qat_common.h79
-rw-r--r--drivers/common/qat/qat_device.c279
-rw-r--r--drivers/common/qat/qat_device.h102
-rw-r--r--drivers/common/qat/qat_logs.c38
-rw-r--r--drivers/common/qat/qat_logs.h34
-rw-r--r--drivers/common/qat/qat_qp.c642
-rw-r--r--drivers/common/qat/qat_qp.h111
-rw-r--r--drivers/compress/Makefile2
-rw-r--r--drivers/compress/isal/isal_compress_pmd.c351
-rw-r--r--drivers/compress/isal/isal_compress_pmd_ops.c14
-rw-r--r--drivers/compress/meson.build2
-rw-r--r--drivers/compress/octeontx/Makefile30
-rw-r--r--drivers/compress/octeontx/include/zip_regs.h711
-rw-r--r--drivers/compress/octeontx/meson.build9
-rw-r--r--drivers/compress/octeontx/otx_zip.c180
-rw-r--r--drivers/compress/octeontx/otx_zip.h277
-rw-r--r--drivers/compress/octeontx/otx_zip_pmd.c658
-rw-r--r--drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map3
-rw-r--r--drivers/compress/qat/meson.build18
-rw-r--r--drivers/compress/qat/qat_comp.c393
-rw-r--r--drivers/compress/qat/qat_comp.h65
-rw-r--r--drivers/compress/qat/qat_comp_pmd.c429
-rw-r--r--drivers/compress/qat/qat_comp_pmd.h39
-rw-r--r--drivers/compress/qat/rte_pmd_qat_version.map3
-rw-r--r--drivers/compress/zlib/Makefile29
-rw-r--r--drivers/compress/zlib/meson.build14
-rw-r--r--drivers/compress/zlib/rte_pmd_zlib_version.map3
-rw-r--r--drivers/compress/zlib/zlib_pmd.c436
-rw-r--r--drivers/compress/zlib/zlib_pmd_ops.c307
-rw-r--r--drivers/compress/zlib/zlib_pmd_private.h71
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c47
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c54
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h36
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c135
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c93
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h58
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c11
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c39
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_private.h2
-rw-r--r--drivers/crypto/ccp/ccp_crypto.c28
-rw-r--r--drivers/crypto/ccp/ccp_pmd_ops.c37
-rw-r--r--drivers/crypto/ccp/ccp_pmd_private.h1
-rw-r--r--drivers/crypto/ccp/rte_ccp_pmd.c20
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c56
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h2
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h2
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c104
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.h1
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec_log.h2
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c44
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_ops.c53
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_private.h28
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd.c134
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_ops.c46
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c26
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c68
-rw-r--r--drivers/crypto/null/null_crypto_pmd_private.h24
-rw-r--r--drivers/crypto/openssl/compat.h108
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c521
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c581
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_private.h55
-rw-r--r--drivers/crypto/qat/Makefile35
-rw-r--r--drivers/crypto/qat/README7
-rw-r--r--drivers/crypto/qat/meson.build22
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h126
-rw-r--r--drivers/crypto/qat/qat_crypto.c1696
-rw-r--r--drivers/crypto/qat/qat_crypto.h150
-rw-r--r--drivers/crypto/qat/qat_logs.h49
-rw-r--r--drivers/crypto/qat/qat_qp.c451
-rw-r--r--drivers/crypto/qat/qat_sym.c569
-rw-r--r--drivers/crypto/qat/qat_sym.h174
-rw-r--r--drivers/crypto/qat/qat_sym_capabilities.h (renamed from drivers/crypto/qat/qat_crypto_capabilities.h)8
-rw-r--r--drivers/crypto/qat/qat_sym_pmd.c331
-rw-r--r--drivers/crypto/qat/qat_sym_pmd.h41
-rw-r--r--drivers/crypto/qat/qat_sym_session.c (renamed from drivers/crypto/qat/qat_adf/qat_algs_build_desc.c)844
-rw-r--r--drivers/crypto/qat/qat_sym_session.h145
-rw-r--r--drivers/crypto/qat/rte_pmd_qat_version.map3
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c180
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c93
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h1
-rw-r--r--drivers/crypto/scheduler/scheduler_failover.c4
-rw-r--r--drivers/crypto/scheduler/scheduler_multicore.c11
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c14
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c118
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c92
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h24
-rw-r--r--drivers/crypto/scheduler/scheduler_roundrobin.c2
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c36
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c51
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_private.h30
-rw-r--r--drivers/crypto/virtio/virtio_cryptodev.c37
-rw-r--r--drivers/crypto/virtio/virtio_cryptodev.h3
-rw-r--r--drivers/crypto/virtio/virtio_rxtx.c16
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c37
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_ops.c52
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_private.h29
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.c4
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev_logs.h2
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c18
-rw-r--r--drivers/event/octeontx/ssovf_worker.c17
-rw-r--r--drivers/event/octeontx/ssovf_worker.h2
-rw-r--r--drivers/event/octeontx/timvf_evdev.c6
-rw-r--r--drivers/event/opdl/opdl_evdev.c5
-rw-r--r--drivers/event/sw/sw_evdev.c118
-rw-r--r--drivers/event/sw/sw_evdev_selftest.c81
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c4
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.c49
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.h9
-rw-r--r--drivers/meson.build10
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c6
-rw-r--r--drivers/net/ark/Makefile32
-rw-r--r--drivers/net/ark/ark_ddm.c33
-rw-r--r--drivers/net/ark/ark_ddm.h33
-rw-r--r--drivers/net/ark/ark_ethdev.c33
-rw-r--r--drivers/net/ark/ark_ethdev_rx.c33
-rw-r--r--drivers/net/ark/ark_ethdev_rx.h33
-rw-r--r--drivers/net/ark/ark_ethdev_tx.c33
-rw-r--r--drivers/net/ark/ark_ethdev_tx.h33
-rw-r--r--drivers/net/ark/ark_ext.h33
-rw-r--r--drivers/net/ark/ark_global.h33
-rw-r--r--drivers/net/ark/ark_logs.h33
-rw-r--r--drivers/net/ark/ark_mpu.c33
-rw-r--r--drivers/net/ark/ark_mpu.h33
-rw-r--r--drivers/net/ark/ark_pktchkr.c33
-rw-r--r--drivers/net/ark/ark_pktchkr.h33
-rw-r--r--drivers/net/ark/ark_pktdir.c33
-rw-r--r--drivers/net/ark/ark_pktdir.h33
-rw-r--r--drivers/net/ark/ark_pktgen.c33
-rw-r--r--drivers/net/ark/ark_pktgen.h33
-rw-r--r--drivers/net/ark/ark_rqp.c33
-rw-r--r--drivers/net/ark/ark_rqp.h33
-rw-r--r--drivers/net/ark/ark_udm.c33
-rw-r--r--drivers/net/ark/ark_udm.h33
-rw-r--r--drivers/net/ark/meson.build13
-rw-r--r--drivers/net/avf/avf_ethdev.c22
-rw-r--r--drivers/net/avp/avp_ethdev.c7
-rw-r--r--drivers/net/avp/meson.build5
-rw-r--r--drivers/net/axgbe/axgbe_ethdev.c10
-rw-r--r--drivers/net/axgbe/axgbe_rxtx.c10
-rw-r--r--drivers/net/bnx2x/LICENSE.bnx2x_pmd3
-rw-r--r--drivers/net/bnx2x/Makefile9
-rw-r--r--drivers/net/bnx2x/bnx2x.c28
-rw-r--r--drivers/net/bnx2x/bnx2x.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c114
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.h8
-rw-r--r--drivers/net/bnx2x/bnx2x_logs.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c5
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c6
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.c5
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.h5
-rw-r--r--drivers/net/bnx2x/ecore_fw_defs.h6
-rw-r--r--drivers/net/bnx2x/ecore_hsi.h6
-rw-r--r--drivers/net/bnx2x/ecore_init.h6
-rw-r--r--drivers/net/bnx2x/ecore_init_ops.h6
-rw-r--r--drivers/net/bnx2x/ecore_mfw_req.h6
-rw-r--r--drivers/net/bnx2x/ecore_reg.h6
-rw-r--r--drivers/net/bnx2x/ecore_sp.c6
-rw-r--r--drivers/net/bnx2x/ecore_sp.h6
-rw-r--r--drivers/net/bnx2x/elink.c6
-rw-r--r--drivers/net/bnx2x/elink.h6
-rw-r--r--drivers/net/bnx2x/meson.build14
-rw-r--r--drivers/net/bnxt/Makefile2
-rw-r--r--drivers/net/bnxt/bnxt.h32
-rw-r--r--drivers/net/bnxt/bnxt_cpr.h12
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c127
-rw-r--r--drivers/net/bnxt/bnxt_filter.c1090
-rw-r--r--drivers/net/bnxt/bnxt_filter.h1
-rw-r--r--drivers/net/bnxt/bnxt_flow.c1171
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c261
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h9
-rw-r--r--drivers/net/bnxt/bnxt_ring.c115
-rw-r--r--drivers/net/bnxt/bnxt_ring.h1
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c58
-rw-r--r--drivers/net/bnxt/bnxt_rxq.h4
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c26
-rw-r--r--drivers/net/bnxt/bnxt_rxr.h2
-rw-r--r--drivers/net/bnxt/bnxt_stats.c3
-rw-r--r--drivers/net/bnxt/bnxt_txq.h1
-rw-r--r--drivers/net/bnxt/bnxt_txr.c156
-rw-r--r--drivers/net/bnxt/bnxt_txr.h10
-rw-r--r--drivers/net/bnxt/bnxt_util.c18
-rw-r--r--drivers/net/bnxt/bnxt_util.h11
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c5
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h6
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h113
-rw-r--r--drivers/net/bnxt/meson.build20
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c39
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c173
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h8
-rw-r--r--drivers/net/cxgbe/Makefile3
-rw-r--r--drivers/net/cxgbe/base/adapter.h106
-rw-r--r--drivers/net/cxgbe/base/common.h37
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c378
-rw-r--r--drivers/net/cxgbe/base/t4_hw.h4
-rw-r--r--drivers/net/cxgbe/base/t4_msg.h210
-rw-r--r--drivers/net/cxgbe/base/t4_regs.h31
-rw-r--r--drivers/net/cxgbe/base/t4_tcb.h26
-rw-r--r--drivers/net/cxgbe/base/t4fw_interface.h266
-rw-r--r--drivers/net/cxgbe/base/t4vf_hw.c6
-rw-r--r--drivers/net/cxgbe/clip_tbl.c193
-rw-r--r--drivers/net/cxgbe/clip_tbl.h31
-rw-r--r--drivers/net/cxgbe/cxgbe.h24
-rw-r--r--drivers/net/cxgbe/cxgbe_compat.h21
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c96
-rw-r--r--drivers/net/cxgbe/cxgbe_filter.c1252
-rw-r--r--drivers/net/cxgbe/cxgbe_filter.h235
-rw-r--r--drivers/net/cxgbe/cxgbe_flow.c845
-rw-r--r--drivers/net/cxgbe/cxgbe_flow.h42
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c450
-rw-r--r--drivers/net/cxgbe/cxgbe_ofld.h89
-rw-r--r--drivers/net/cxgbe/cxgbe_pfvf.h2
-rw-r--r--drivers/net/cxgbe/cxgbevf_ethdev.c3
-rw-r--r--drivers/net/cxgbe/cxgbevf_main.c20
-rw-r--r--drivers/net/cxgbe/meson.build14
-rw-r--r--drivers/net/cxgbe/sge.c207
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.c101
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.h8
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.c20
-rw-r--r--drivers/net/dpaa/rte_pmd_dpaa.h5
-rw-r--r--drivers/net/dpaa/rte_pmd_dpaa_version.map4
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.c4
-rw-r--r--drivers/net/dpaa2/dpaa2_pmd_logs.h2
-rw-r--r--drivers/net/dpaa2/dpaa2_rxtx.c16
-rw-r--r--drivers/net/dpaa2/mc/dpni.c2
-rw-r--r--drivers/net/e1000/em_ethdev.c4
-rw-r--r--drivers/net/e1000/em_rxtx.c20
-rw-r--r--drivers/net/e1000/igb_ethdev.c11
-rw-r--r--drivers/net/e1000/igb_rxtx.c27
-rw-r--r--drivers/net/ena/Makefile1
-rw-r--r--drivers/net/ena/base/ena_com.c711
-rw-r--r--drivers/net/ena/base/ena_com.h112
-rw-r--r--drivers/net/ena/base/ena_defs/ena_admin_defs.h1164
-rw-r--r--drivers/net/ena/base/ena_defs/ena_common_defs.h8
-rw-r--r--drivers/net/ena/base/ena_defs/ena_eth_io_defs.h758
-rw-r--r--drivers/net/ena/base/ena_defs/ena_gen_info.h4
-rw-r--r--drivers/net/ena/base/ena_defs/ena_includes.h2
-rw-r--r--drivers/net/ena/base/ena_defs/ena_regs_defs.h36
-rw-r--r--drivers/net/ena/base/ena_eth_com.c78
-rw-r--r--drivers/net/ena/base/ena_eth_com.h10
-rw-r--r--drivers/net/ena/base/ena_plat.h4
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h74
-rw-r--r--drivers/net/ena/ena_ethdev.c727
-rw-r--r--drivers/net/ena/ena_ethdev.h32
-rw-r--r--drivers/net/ena/meson.build11
-rw-r--r--drivers/net/enic/base/cq_desc.h1
-rw-r--r--drivers/net/enic/base/vnic_dev.c16
-rw-r--r--drivers/net/enic/base/vnic_dev.h4
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h23
-rw-r--r--drivers/net/enic/base/vnic_enet.h5
-rw-r--r--drivers/net/enic/base/vnic_nic.h4
-rw-r--r--drivers/net/enic/base/vnic_rq.h2
-rw-r--r--drivers/net/enic/base/vnic_wq.c9
-rw-r--r--drivers/net/enic/base/vnic_wq.h12
-rw-r--r--drivers/net/enic/enic.h12
-rw-r--r--drivers/net/enic/enic_compat.h5
-rw-r--r--drivers/net/enic/enic_ethdev.c172
-rw-r--r--drivers/net/enic/enic_main.c145
-rw-r--r--drivers/net/enic/enic_res.c13
-rw-r--r--drivers/net/enic/enic_res.h16
-rw-r--r--drivers/net/enic/enic_rxtx.c303
-rw-r--r--drivers/net/failsafe/failsafe.c5
-rw-r--r--drivers/net/failsafe/failsafe_args.c2
-rw-r--r--drivers/net/failsafe/failsafe_eal.c2
-rw-r--r--drivers/net/failsafe/meson.build23
-rw-r--r--drivers/net/fm10k/fm10k.h10
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c131
-rw-r--r--drivers/net/fm10k/fm10k_rxtx.c78
-rw-r--r--drivers/net/i40e/i40e_ethdev.c362
-rw-r--r--drivers/net/i40e/i40e_ethdev.h78
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c155
-rw-r--r--drivers/net/i40e/i40e_fdir.c1
-rw-r--r--drivers/net/i40e/i40e_flow.c1
-rw-r--r--drivers/net/i40e/i40e_rxtx.c151
-rw-r--r--drivers/net/i40e/i40e_rxtx.h2
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_avx2.c2
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c4
-rw-r--r--drivers/net/ifc/Makefile8
-rw-r--r--drivers/net/ifc/ifcvf_vdpa.c7
-rw-r--r--drivers/net/ifc/meson.build8
-rw-r--r--drivers/net/ifc/rte_pmd_ifc_version.map (renamed from drivers/net/ifc/rte_ifcvf_version.map)0
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c31
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_ipsec.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c14
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c166
-rw-r--r--drivers/net/kni/meson.build8
-rw-r--r--drivers/net/kni/rte_eth_kni.c6
-rw-r--r--drivers/net/liquidio/Makefile2
-rw-r--r--drivers/net/liquidio/lio_ethdev.c4
-rw-r--r--drivers/net/liquidio/meson.build8
-rw-r--r--drivers/net/liquidio/rte_pmd_liquidio_version.map (renamed from drivers/net/liquidio/rte_pmd_lio_version.map)0
-rw-r--r--drivers/net/meson.build31
-rw-r--r--drivers/net/mlx4/Makefile7
-rw-r--r--drivers/net/mlx4/mlx4.c53
-rw-r--r--drivers/net/mlx4/mlx4.h6
-rw-r--r--drivers/net/mlx4/mlx4_prm.h16
-rw-r--r--drivers/net/mlx4/mlx4_rxq.c30
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.c522
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.h2
-rw-r--r--drivers/net/mlx4/mlx4_txq.c8
-rw-r--r--drivers/net/mlx5/Makefile238
-rw-r--r--drivers/net/mlx5/mlx5.c1206
-rw-r--r--drivers/net/mlx5/mlx5.h84
-rw-r--r--drivers/net/mlx5/mlx5_defs.h32
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c247
-rw-r--r--drivers/net/mlx5/mlx5_flow.c4980
-rw-r--r--drivers/net/mlx5/mlx5_glue.c4
-rw-r--r--drivers/net/mlx5/mlx5_mac.c2
-rw-r--r--drivers/net/mlx5/mlx5_mr.c119
-rw-r--r--drivers/net/mlx5/mlx5_mr.h5
-rw-r--r--drivers/net/mlx5/mlx5_nl.c317
-rw-r--r--drivers/net/mlx5/mlx5_nl_flow.c1248
-rw-r--r--drivers/net/mlx5/mlx5_prm.h12
-rw-r--r--drivers/net/mlx5/mlx5_rss.c7
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c26
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c392
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c112
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h93
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h4
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h16
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h16
-rw-r--r--drivers/net/mlx5/mlx5_socket.c6
-rw-r--r--drivers/net/mlx5/mlx5_stats.c6
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c45
-rw-r--r--drivers/net/mlx5/mlx5_txq.c46
-rw-r--r--drivers/net/mvpp2/mrvl_ethdev.c154
-rw-r--r--drivers/net/mvpp2/mrvl_ethdev.h8
-rw-r--r--drivers/net/mvpp2/mrvl_flow.c32
-rw-r--r--drivers/net/mvpp2/mrvl_qos.c42
-rw-r--r--drivers/net/netvsc/Makefile23
-rw-r--r--drivers/net/netvsc/hn_ethdev.c761
-rw-r--r--drivers/net/netvsc/hn_logs.h36
-rw-r--r--drivers/net/netvsc/hn_nvs.c546
-rw-r--r--drivers/net/netvsc/hn_nvs.h229
-rw-r--r--drivers/net/netvsc/hn_rndis.c1099
-rw-r--r--drivers/net/netvsc/hn_rndis.h32
-rw-r--r--drivers/net/netvsc/hn_rxtx.c1334
-rw-r--r--drivers/net/netvsc/hn_var.h158
-rw-r--r--drivers/net/netvsc/meson.build10
-rw-r--r--drivers/net/netvsc/ndis.h378
-rw-r--r--drivers/net/netvsc/rndis.h414
-rw-r--r--drivers/net/netvsc/rte_pmd_netvsc_version.map5
-rw-r--r--drivers/net/nfp/meson.build16
-rw-r--r--drivers/net/nfp/nfp_net.c33
-rw-r--r--drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h1
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cpp.h6
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c184
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cppcore.c9
-rw-r--r--drivers/net/null/rte_eth_null.c6
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.c24
-rw-r--r--drivers/net/octeontx/octeontx_rxtx.c2
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c205
-rw-r--r--drivers/net/qede/LICENSE.qede_pmd3
-rw-r--r--drivers/net/qede/Makefile3
-rw-r--r--drivers/net/qede/base/bcm_osal.c9
-rw-r--r--drivers/net/qede/base/bcm_osal.h4
-rw-r--r--drivers/net/qede/base/common_hsi.h4
-rw-r--r--drivers/net/qede/base/ecore.h4
-rw-r--r--drivers/net/qede/base/ecore_attn_values.h4
-rw-r--r--drivers/net/qede/base/ecore_chain.h4
-rw-r--r--drivers/net/qede/base/ecore_cxt.c4
-rw-r--r--drivers/net/qede/base/ecore_cxt.h4
-rw-r--r--drivers/net/qede/base/ecore_cxt_api.h4
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c4
-rw-r--r--drivers/net/qede/base/ecore_dcbx.h4
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h4
-rw-r--r--drivers/net/qede/base/ecore_dev.c14
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h4
-rw-r--r--drivers/net/qede/base/ecore_gtt_reg_addr.h4
-rw-r--r--drivers/net/qede/base/ecore_gtt_values.h4
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h4
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h4
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h4
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_func.h4
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_tool.h4
-rw-r--r--drivers/net/qede/base/ecore_hw.c4
-rw-r--r--drivers/net/qede/base/ecore_hw.h4
-rw-r--r--drivers/net/qede/base/ecore_hw_defs.h4
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c4
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h4
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c4
-rw-r--r--drivers/net/qede/base/ecore_init_ops.h4
-rw-r--r--drivers/net/qede/base/ecore_int.c18
-rw-r--r--drivers/net/qede/base/ecore_int.h4
-rw-r--r--drivers/net/qede/base/ecore_int_api.h4
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h4
-rw-r--r--drivers/net/qede/base/ecore_iro.h4
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h4
-rw-r--r--drivers/net/qede/base/ecore_l2.c4
-rw-r--r--drivers/net/qede/base/ecore_l2.h4
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h4
-rw-r--r--drivers/net/qede/base/ecore_mcp.c4
-rw-r--r--drivers/net/qede/base/ecore_mcp.h4
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h4
-rw-r--r--drivers/net/qede/base/ecore_mng_tlv.c4
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h4
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h4
-rw-r--r--drivers/net/qede/base/ecore_sp_api.h4
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c4
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h4
-rw-r--r--drivers/net/qede/base/ecore_spq.c4
-rw-r--r--drivers/net/qede/base/ecore_spq.h4
-rw-r--r--drivers/net/qede/base/ecore_sriov.c48
-rw-r--r--drivers/net/qede/base/ecore_sriov.h4
-rw-r--r--drivers/net/qede/base/ecore_status.h4
-rw-r--r--drivers/net/qede/base/ecore_utils.h4
-rw-r--r--drivers/net/qede/base/ecore_vf.c37
-rw-r--r--drivers/net/qede/base/ecore_vf.h13
-rw-r--r--drivers/net/qede/base/ecore_vf_api.h4
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h20
-rw-r--r--drivers/net/qede/base/eth_common.h4
-rw-r--r--drivers/net/qede/base/mcp_public.h4
-rw-r--r--drivers/net/qede/base/nvm_cfg.h4
-rw-r--r--drivers/net/qede/base/reg_addr.h4
-rw-r--r--drivers/net/qede/qede_ethdev.c274
-rw-r--r--drivers/net/qede/qede_ethdev.h7
-rw-r--r--drivers/net/qede/qede_fdir.c7
-rw-r--r--drivers/net/qede/qede_if.h4
-rw-r--r--drivers/net/qede/qede_logs.h4
-rw-r--r--drivers/net/qede/qede_main.c11
-rw-r--r--drivers/net/qede/qede_rxtx.c27
-rw-r--r--drivers/net/qede/qede_rxtx.h5
-rw-r--r--drivers/net/ring/rte_eth_ring.c5
-rw-r--r--drivers/net/sfc/meson.build2
-rw-r--r--drivers/net/sfc/sfc_dp_rx.h1
-rw-r--r--drivers/net/sfc/sfc_dp_tx.h2
-rw-r--r--drivers/net/sfc/sfc_ef10_essb_rx.c73
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c3
-rw-r--r--drivers/net/sfc/sfc_ef10_rx_ev.h8
-rw-r--r--drivers/net/sfc/sfc_ethdev.c15
-rw-r--r--drivers/net/sfc/sfc_filter.c14
-rw-r--r--drivers/net/sfc/sfc_filter.h10
-rw-r--r--drivers/net/sfc/sfc_flow.c95
-rw-r--r--drivers/net/sfc/sfc_rx.c49
-rw-r--r--drivers/net/sfc/sfc_tx.c25
-rw-r--r--drivers/net/sfc/sfc_tx.h1
-rw-r--r--drivers/net/softnic/Makefile23
-rw-r--r--drivers/net/softnic/conn.c332
-rw-r--r--drivers/net/softnic/conn.h49
-rw-r--r--drivers/net/softnic/firmware.cli21
-rw-r--r--drivers/net/softnic/hash_func.h359
-rw-r--r--drivers/net/softnic/hash_func_arm64.h261
-rw-r--r--drivers/net/softnic/meson.build18
-rw-r--r--drivers/net/softnic/parser.c703
-rw-r--r--drivers/net/softnic/parser.h68
-rw-r--r--drivers/net/softnic/rte_eth_softnic.c763
-rw-r--r--drivers/net/softnic/rte_eth_softnic.h49
-rw-r--r--drivers/net/softnic/rte_eth_softnic_action.c389
-rw-r--r--drivers/net/softnic/rte_eth_softnic_cli.c5259
-rw-r--r--drivers/net/softnic/rte_eth_softnic_internals.h812
-rw-r--r--drivers/net/softnic/rte_eth_softnic_link.c98
-rw-r--r--drivers/net/softnic/rte_eth_softnic_mempool.c103
-rw-r--r--drivers/net/softnic/rte_eth_softnic_pipeline.c966
-rw-r--r--drivers/net/softnic/rte_eth_softnic_swq.c114
-rw-r--r--drivers/net/softnic/rte_eth_softnic_tap.c118
-rw-r--r--drivers/net/softnic/rte_eth_softnic_thread.c2929
-rw-r--r--drivers/net/softnic/rte_eth_softnic_tm.c322
-rw-r--r--drivers/net/softnic/rte_pmd_softnic_version.map (renamed from drivers/net/softnic/rte_pmd_eth_softnic_version.map)6
-rw-r--r--drivers/net/szedata2/meson.build7
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c7
-rw-r--r--drivers/net/tap/Makefile2
-rw-r--r--drivers/net/tap/rte_eth_tap.c396
-rw-r--r--drivers/net/tap/rte_eth_tap.h3
-rw-r--r--drivers/net/tap/tap_flow.c18
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.c13
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.h1
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c163
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.h2
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c144
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h24
-rw-r--r--drivers/net/thunderx/nicvf_struct.h29
-rw-r--r--drivers/net/vhost/meson.build8
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c8
-rw-r--r--drivers/net/virtio/virtio_ethdev.c78
-rw-r--r--drivers/net/virtio/virtio_ethdev.h17
-rw-r--r--drivers/net/virtio/virtio_pci.h12
-rw-r--r--drivers/net/virtio/virtio_rxtx.c691
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c67
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.h49
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c30
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h4
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c49
-rw-r--r--drivers/net/virtio/virtqueue.c8
-rw-r--r--drivers/net/virtio/virtqueue.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c8
-rw-r--r--drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c5
-rw-r--r--drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h2
-rw-r--r--drivers/raw/dpaa2_cmdif/meson.build1
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma.c5
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h2
-rw-r--r--drivers/raw/dpaa2_qdma/meson.build1
-rw-r--r--drivers/raw/ifpga_rawdev/ifpga_rawdev.c4
-rw-r--r--drivers/raw/skeleton_rawdev/Makefile1
-rw-r--r--drivers/raw/skeleton_rawdev/meson.build2
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev.c18
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c13
557 files changed, 50823 insertions, 14622 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index c88638c8..75660765 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -13,6 +13,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += baseband
DEPDIRS-baseband := common bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
DEPDIRS-crypto := common bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += common/qat
+DEPDIRS-common/qat := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += compress
DEPDIRS-compress := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
diff --git a/drivers/baseband/null/bbdev_null.c b/drivers/baseband/null/bbdev_null.c
index 76fc9d7a..2f251510 100644
--- a/drivers/baseband/null/bbdev_null.c
+++ b/drivers/baseband/null/bbdev_null.c
@@ -348,9 +348,7 @@ RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
BBDEV_NULL_SOCKET_ID_ARG"=<int>");
RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, bbdev_null);
-RTE_INIT(null_bbdev_init_log);
-static void
-null_bbdev_init_log(void)
+RTE_INIT(null_bbdev_init_log)
{
bbdev_null_logtype = rte_log_register("pmd.bb.null");
if (bbdev_null_logtype >= 0)
diff --git a/drivers/baseband/turbo_sw/bbdev_turbo_software.c b/drivers/baseband/turbo_sw/bbdev_turbo_software.c
index 05e95ed9..8ceb2769 100644
--- a/drivers/baseband/turbo_sw/bbdev_turbo_software.c
+++ b/drivers/baseband/turbo_sw/bbdev_turbo_software.c
@@ -490,8 +490,8 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
return;
}
crc_req.data = in;
- crc_req.len = (k - 24) >> 3;
- /* Check if there is a room for CRC bits. If not use
+ crc_req.len = k - 24;
+ /* Check if there is a room for CRC bits if not use
* the temporary buffer.
*/
if (rte_pktmbuf_append(m_in, 3) == NULL) {
@@ -522,8 +522,8 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
return;
}
crc_req.data = in;
- crc_req.len = (k - 24) >> 3;
- /* Check if there is a room for CRC bits. If this is the last
+ crc_req.len = k - 24;
+ /* Check if there is a room for CRC bits if this is the last
* CB in TB. If not use temporary buffer.
*/
if ((c - r == 1) && (rte_pktmbuf_append(m_in, 3) == NULL)) {
@@ -1299,9 +1299,7 @@ RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
TURBO_SW_SOCKET_ID_ARG"=<int>");
RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, turbo_sw);
-RTE_INIT(null_bbdev_init_log);
-static void
-null_bbdev_init_log(void)
+RTE_INIT(turbo_sw_bbdev_init_log)
{
bbdev_turbo_sw_logtype = rte_log_register("pmd.bb.turbo_sw");
if (bbdev_turbo_sw_logtype >= 0)
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index ef7f2475..cea3b55e 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -10,5 +10,6 @@ endif
DIRS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga
DIRS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci
DIRS-$(CONFIG_RTE_LIBRTE_VDEV_BUS) += vdev
+DIRS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 0148b98e..4ebbc3d3 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -16,6 +16,9 @@
#include <fsl_fman_crc64.h>
#include <fsl_bman.h>
+#define FMAN_SP_SG_DISABLE 0x80000000
+#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
+
/* Instantiate the global variable that the inline CRC64 implementation (in
* <fsl_fman.h>) depends on.
*/
@@ -422,20 +425,16 @@ fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
int
fman_if_get_fdoff(struct fman_if *fm_if)
{
- u32 fmbm_ricp;
+ u32 fmbm_rebm;
int fdoff;
- int iceof_mask = 0x001f0000;
- int icsz_mask = 0x0000001f;
struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
assert(fman_ccsr_map_fd != -1);
- fmbm_ricp =
- in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp);
- /*iceof + icsz*/
- fdoff = ((fmbm_ricp & iceof_mask) >> 16) * 16 +
- (fmbm_ricp & icsz_mask) * 16;
+ fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
+
+ fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff;
return fdoff;
}
@@ -502,12 +501,16 @@ fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset)
{
struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
unsigned int *fmbm_rebm;
+ int val = 0;
+ int fmbm_mask = 0x01ff0000;
+
+ val = fd_offset << FMAN_SP_EXT_BUF_MARG_START_SHIFT;
assert(fman_ccsr_map_fd != -1);
fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
- out_be32(fmbm_rebm, in_be32(fmbm_rebm) | (fd_offset << 16));
+ out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
}
void
@@ -536,6 +539,47 @@ fman_if_get_maxfrm(struct fman_if *fm_if)
return (in_be32(reg_maxfrm) | 0x0000FFFF);
}
+/* MSB in fmbm_rebm register
+ * 0 - If BMI cannot store the frame in a single buffer it may select a buffer
+ * of smaller size and store the frame in scatter gather (S/G) buffers
+ * 1 - Scatter gather format is not enabled for frame storage. If BMI cannot
+ * store the frame in a single buffer, the frame is discarded.
+ */
+
+int
+fman_if_get_sg_enable(struct fman_if *fm_if)
+{
+ u32 fmbm_rebm;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
+
+ return (fmbm_rebm & FMAN_SP_SG_DISABLE) ? 0 : 1;
+}
+
+void
+fman_if_set_sg(struct fman_if *fm_if, int enable)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rebm;
+ int val;
+ int fmbm_mask = FMAN_SP_SG_DISABLE;
+
+ if (enable)
+ val = 0;
+ else
+ val = FMAN_SP_SG_DISABLE;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
+
+ out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
+}
+
void
fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia)
{
diff --git a/drivers/bus/dpaa/base/fman/netcfg_layer.c b/drivers/bus/dpaa/base/fman/netcfg_layer.c
index 3e956ce1..031c6f1a 100644
--- a/drivers/bus/dpaa/base/fman/netcfg_layer.c
+++ b/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -18,11 +18,6 @@
#include <rte_dpaa_logs.h>
#include <netcfg.h>
-/* Structure contains information about all the interfaces given by user
- * on command line.
- */
-struct netcfg_interface *netcfg_interface;
-
/* This data structure contaings all configurations information
* related to usages of DPA devices.
*/
diff --git a/drivers/bus/dpaa/base/fman/of.c b/drivers/bus/dpaa/base/fman/of.c
index 1b2dbe26..a7f3174e 100644
--- a/drivers/bus/dpaa/base/fman/of.c
+++ b/drivers/bus/dpaa/base/fman/of.c
@@ -182,6 +182,11 @@ linear_dir(struct dt_dir *d)
DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
d->node.node.full_name);
d->lphandle = f;
+ } else if (!strcmp(f->node.node.name, "phandle")) {
+ if (d->lphandle)
+ DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
+ d->node.node.full_name);
+ d->lphandle = f;
} else if (!strcmp(f->node.node.name, "#address-cells")) {
if (d->a_cells)
DPAA_BUS_LOG(DEBUG, "Duplicate a_cells in %s",
@@ -541,3 +546,42 @@ of_device_is_compatible(const struct device_node *dev_node,
return true;
return false;
}
+
+static const void *of_get_mac_addr(const struct device_node *np,
+ const char *name)
+{
+ return of_get_property(np, name, NULL);
+}
+
+/**
+ * Search the device tree for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the device tree, but were not set by U-Boot. For example, the
+ * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
+ * addresses. Some older U-Boots only initialized 'local-mac-address'. In
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
+ */
+const void *of_get_mac_address(const struct device_node *np)
+{
+ const void *addr;
+
+ addr = of_get_mac_addr(np, "mac-address");
+ if (addr)
+ return addr;
+
+ addr = of_get_mac_addr(np, "local-mac-address");
+ if (addr)
+ return addr;
+
+ return of_get_mac_addr(np, "address");
+}
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c
index 1381da36..b14b5905 100644
--- a/drivers/bus/dpaa/base/qbman/bman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -15,9 +15,9 @@
/*
* Global variables of the max portal/pool number this bman version supported
*/
-u16 bman_ip_rev;
+static u16 bman_ip_rev;
u16 bman_pool_max;
-void *bman_ccsr_map;
+static void *bman_ccsr_map;
/*****************/
/* Portal driver */
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index 27d98cc1..7c17027f 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -625,7 +625,7 @@ fail_eqcr:
#define MAX_GLOBAL_PORTALS 8
static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
-rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
+static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
static struct qman_portal *
qman_alloc_global_portal(void)
@@ -1058,7 +1058,7 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
struct qm_portal *portal = &p->p;
register struct qm_dqrr *dqrr = &portal->dqrr;
struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
- struct qman_fq *fq[QM_DQRR_SIZE];
+ struct qman_fq *fq;
unsigned int limit = 0, rx_number = 0;
uint32_t consume = 0;
@@ -1092,14 +1092,13 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
/* SDQCR: context_b points to the FQ */
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- fq[rx_number] = qman_fq_lookup_table[be32_to_cpu(
- dq[rx_number]->contextB)];
+ fq = qman_fq_lookup_table[be32_to_cpu(dq[rx_number]->contextB)];
#else
- fq[rx_number] = (void *)be32_to_cpu(
- dq[rx_number]->contextB);
+ fq = (void *)be32_to_cpu(dq[rx_number]->contextB);
#endif
- fq[rx_number]->cb.dqrr_prepare(shadow[rx_number],
- &bufs[rx_number]);
+ if (fq->cb.dqrr_prepare)
+ fq->cb.dqrr_prepare(shadow[rx_number],
+ &bufs[rx_number]);
consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
rx_number++;
@@ -1107,7 +1106,7 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
} while (++limit < poll_limit);
if (rx_number)
- fq[0]->cb.dqrr_dpdk_pull_cb(fq, shadow, bufs, rx_number);
+ fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
/* Consume all the DQRR enries together */
qm_out(DQRR_DCAP, (1 << 8) | consume);
@@ -2003,13 +2002,13 @@ int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
return 0;
}
-int qman_set_vdq(struct qman_fq *fq, u16 num)
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
{
struct qman_portal *p = get_affine_portal();
uint32_t vdqcr;
int ret = -EBUSY;
- vdqcr = QM_VDQCR_EXACT;
+ vdqcr = vdqcr_flags;
vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
if ((fq->state != qman_fq_state_parked) &&
diff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c b/drivers/bus/dpaa/base/qbman/qman_driver.c
index 07b29d55..f6ecd6b2 100644
--- a/drivers/bus/dpaa/base/qbman/qman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -20,9 +20,9 @@ u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
u16 qm_channel_pme = QMAN_CHANNEL_PME;
/* Ccsr map address to access ccsrbased register */
-void *qman_ccsr_map;
+static void *qman_ccsr_map;
/* The qman clock frequency */
-u32 qman_clk;
+static u32 qman_clk;
static __thread int qmfd = -1;
static __thread struct qm_portal_config qpcfg;
diff --git a/drivers/bus/dpaa/base/qbman/qman_priv.h b/drivers/bus/dpaa/base/qbman/qman_priv.h
index 9e4471e6..02f6301f 100644
--- a/drivers/bus/dpaa/base/qbman/qman_priv.h
+++ b/drivers/bus/dpaa/base/qbman/qman_priv.h
@@ -139,7 +139,6 @@ struct qm_portal_config {
#define QMAN_REV31 0x0301
#define QMAN_REV32 0x0302
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
-extern u32 qman_clk;
int qm_set_wpm(int wpm);
int qm_get_wpm(int *wpm);
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 20462065..16fabd1b 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -50,7 +50,7 @@ struct rte_dpaa_bus rte_dpaa_bus;
struct netcfg_info *dpaa_netcfg;
/* define a variable to hold the portal_key, once created.*/
-pthread_key_t dpaa_portal_key;
+static pthread_key_t dpaa_portal_key;
unsigned int dpaa_svr_family;
@@ -539,6 +539,13 @@ rte_dpaa_bus_probe(void)
unsigned int svr_ver;
int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
+ svr_file = fopen(DPAA_SOC_ID_FILE, "r");
+ if (svr_file) {
+ if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
+ dpaa_svr_family = svr_ver & SVR_MASK;
+ fclose(svr_file);
+ }
+
/* For each registered driver, and device, call the driver->probe */
TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
@@ -569,13 +576,6 @@ rte_dpaa_bus_probe(void)
if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list))
rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
- svr_file = fopen(DPAA_SOC_ID_FILE, "r");
- if (svr_file) {
- if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
- dpaa_svr_family = svr_ver & SVR_MASK;
- fclose(svr_file);
- }
-
return 0;
}
@@ -626,9 +626,7 @@ struct rte_dpaa_bus rte_dpaa_bus = {
RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
-RTE_INIT(dpaa_init_log);
-static void
-dpaa_init_log(void)
+RTE_INIT(dpaa_init_log)
{
dpaa_logtype_bus = rte_log_register("bus.dpaa");
if (dpaa_logtype_bus >= 0)
diff --git a/drivers/bus/dpaa/include/compat.h b/drivers/bus/dpaa/include/compat.h
index e4b57021..92241d23 100644
--- a/drivers/bus/dpaa/include/compat.h
+++ b/drivers/bus/dpaa/include/compat.h
@@ -48,9 +48,15 @@
*/
/* Required compiler attributes */
+#ifndef __maybe_unused
#define __maybe_unused __rte_unused
+#endif
+#ifndef __always_unused
#define __always_unused __rte_unused
+#endif
+#ifndef __packed
#define __packed __rte_packed
+#endif
#define noinline __attribute__((noinline))
#define L1_CACHE_BYTES 64
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index c0ef1bff..1d1ce867 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -108,6 +108,12 @@ int fman_if_get_fdoff(struct fman_if *fm_if);
/* Set interface fd->offset value */
void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);
+/* Get interface SG enable status value */
+int fman_if_get_sg_enable(struct fman_if *fm_if);
+
+/* Set interface SG support mode */
+void fman_if_set_sg(struct fman_if *fm_if, int enable);
+
/* Get interface Max Frame length (MTU) */
uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index e4ad7ae4..b18cf037 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1332,10 +1332,11 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
* qman_set_vdq - Issue a volatile dequeue command
* @fq: Frame Queue on which the volatile dequeue command is issued
* @num: Number of Frames requested for volatile dequeue
+ * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command
*
* This function will issue a volatile dequeue command to the QMAN.
*/
-int qman_set_vdq(struct qman_fq *fq, u16 num);
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
/**
* qman_dequeue - Get the DQRR entry after volatile dequeue command
diff --git a/drivers/bus/dpaa/include/of.h b/drivers/bus/dpaa/include/of.h
index 151be5a3..7ea7608f 100644
--- a/drivers/bus/dpaa/include/of.h
+++ b/drivers/bus/dpaa/include/of.h
@@ -109,6 +109,8 @@ const struct device_node *of_get_parent(const struct device_node *dev_node);
const struct device_node *of_get_next_child(const struct device_node *dev_node,
const struct device_node *prev);
+const void *of_get_mac_address(const struct device_node *np);
+
#define for_each_child_node(parent, child) \
for (child = of_get_next_child(parent, NULL); child != NULL; \
child = of_get_next_child(parent, child))
diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map
index 8d902854..7d6d6243 100644
--- a/drivers/bus/dpaa/rte_bus_dpaa_version.map
+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -92,3 +92,13 @@ DPDK_18.02 {
local: *;
} DPDK_17.11;
+
+DPDK_18.08 {
+ global:
+
+ fman_if_get_sg_enable;
+ fman_if_set_sg;
+ of_get_mac_address;
+
+ local: *;
+} DPDK_18.02;
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index 8573bd6e..15dc6a4a 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -164,8 +164,7 @@ void dpaa_portal_finish(void *arg);
/** Helper for DPAA device registration from driver (eth, crypto) instance */
#define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \
-RTE_INIT(dpaainitfn_ ##nm); \
-static void dpaainitfn_ ##nm(void) \
+RTE_INIT(dpaainitfn_ ##nm) \
{\
(dpaa_drv).driver.name = RTE_STR(nm);\
rte_dpaa_driver_register(&dpaa_drv); \
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index c6301b20..d2900edc 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -519,9 +519,7 @@ struct rte_fslmc_bus rte_fslmc_bus = {
RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus);
-RTE_INIT(fslmc_init_log);
-static void
-fslmc_init_log(void)
+RTE_INIT(fslmc_init_log)
{
/* Bus level logs */
dpaa2_logtype_bus = rte_log_register("bus.fslmc");
diff --git a/drivers/bus/fslmc/fslmc_logs.h b/drivers/bus/fslmc/fslmc_logs.h
index 9750b8c8..dd74cb7d 100644
--- a/drivers/bus/fslmc/fslmc_logs.h
+++ b/drivers/bus/fslmc/fslmc_logs.h
@@ -18,7 +18,7 @@ extern int dpaa2_logtype_bus;
rte_log(RTE_LOG_DEBUG, dpaa2_logtype_bus, "fslmc: %s(): " fmt "\n", \
__func__, ##args)
-#define BUS_INIT_FUNC_TRACE() DPAA2_BUS_LOG(DEBUG, " >>")
+#define BUS_INIT_FUNC_TRACE() DPAA2_BUS_DEBUG(" >>")
#define DPAA2_BUS_INFO(fmt, args...) \
DPAA2_BUS_LOG(INFO, fmt, ## args)
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index 713ec965..07145005 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -122,8 +122,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->vdq.valid_bit = QB_VALID_BIT;
p->dqrr.next_idx = 0;
p->dqrr.valid_bit = QB_VALID_BIT;
- qman_version = p->desc.qman_version;
- if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
+ if ((p->desc.qman_version & 0xFFFF0000) < QMAN_REV_4100) {
p->dqrr.dqrr_size = 4;
p->dqrr.reset_bug = 1;
} else {
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.h b/drivers/bus/fslmc/qbman/qbman_portal.h
index 8bff0b4f..dbea22a1 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/qbman_portal.h
@@ -7,7 +7,6 @@
#include "qbman_sys.h"
#include <fsl_qbman_portal.h>
-uint32_t qman_version;
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 33552b48..cea5b78f 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -173,8 +173,7 @@ void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver);
/** Helper for DPAA2 device registration from driver (eth, crypto) instance */
#define RTE_PMD_REGISTER_DPAA2(nm, dpaa2_drv) \
-RTE_INIT(dpaa2initfn_ ##nm); \
-static void dpaa2initfn_ ##nm(void) \
+RTE_INIT(dpaa2initfn_ ##nm) \
{\
(dpaa2_drv).driver.name = RTE_STR(nm);\
rte_fslmc_driver_register(&dpaa2_drv); \
@@ -203,8 +202,7 @@ uint32_t rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type);
/** Helper for DPAA2 object registration */
#define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \
-RTE_INIT(dpaa2objinitfn_ ##nm); \
-static void dpaa2objinitfn_ ##nm(void) \
+RTE_INIT(dpaa2objinitfn_ ##nm) \
{\
(dpaa2_obj).name = RTE_STR(nm);\
rte_fslmc_object_register(&dpaa2_obj); \
diff --git a/drivers/bus/ifpga/rte_bus_ifpga.h b/drivers/bus/ifpga/rte_bus_ifpga.h
index 981bc352..51d5ae0d 100644
--- a/drivers/bus/ifpga/rte_bus_ifpga.h
+++ b/drivers/bus/ifpga/rte_bus_ifpga.h
@@ -134,9 +134,8 @@ void rte_ifpga_driver_register(struct rte_afu_driver *driver);
void rte_ifpga_driver_unregister(struct rte_afu_driver *driver);
#define RTE_PMD_REGISTER_AFU(nm, afudrv)\
-RTE_INIT(afudrvinitfn_ ##afudrv);\
static const char *afudrvinit_ ## nm ## _alias;\
-static void afudrvinitfn_ ##afudrv(void)\
+RTE_INIT(afudrvinitfn_ ##afudrv)\
{\
(afudrv).driver.name = RTE_STR(nm);\
(afudrv).driver.alias = afudrvinit_ ## nm ## _alias;\
diff --git a/drivers/bus/meson.build b/drivers/bus/meson.build
index 52c755dc..80de2d91 100644
--- a/drivers/bus/meson.build
+++ b/drivers/bus/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['dpaa', 'fslmc', 'ifpga', 'pci', 'vdev']
+drivers = ['dpaa', 'fslmc', 'ifpga', 'pci', 'vdev', 'vmbus']
std_deps = ['eal']
config_flag_fmt = 'RTE_LIBRTE_@0@_BUS'
driver_name_fmt = 'rte_bus_@0@'
diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
index 004600f1..04648ac9 100644
--- a/drivers/bus/pci/linux/pci.c
+++ b/drivers/bus/pci/linux/pci.c
@@ -15,7 +15,6 @@
#include <rte_memcpy.h>
#include <rte_vfio.h>
-#include "eal_private.h"
#include "eal_filesystem.h"
#include "private.h"
diff --git a/drivers/bus/pci/linux/pci_uio.c b/drivers/bus/pci/linux/pci_uio.c
index d423e4bb..a7c14421 100644
--- a/drivers/bus/pci/linux/pci_uio.c
+++ b/drivers/bus/pci/linux/pci_uio.c
@@ -282,22 +282,19 @@ int
pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
struct mapped_pci_resource *uio_res, int map_idx)
{
- int fd;
+ int fd = -1;
char devname[PATH_MAX];
void *mapaddr;
struct rte_pci_addr *loc;
struct pci_map *maps;
+ int wc_activate = 0;
+
+ if (dev->driver != NULL)
+ wc_activate = dev->driver->drv_flags & RTE_PCI_DRV_WC_ACTIVATE;
loc = &dev->addr;
maps = uio_res->maps;
- /* update devname for mmap */
- snprintf(devname, sizeof(devname),
- "%s/" PCI_PRI_FMT "/resource%d",
- rte_pci_get_sysfs_path(),
- loc->domain, loc->bus, loc->devid,
- loc->function, res_idx);
-
/* allocate memory to keep path */
maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
if (maps[map_idx].path == NULL) {
@@ -309,11 +306,37 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
/*
* open resource file, to mmap it
*/
- fd = open(devname, O_RDWR);
- if (fd < 0) {
- RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ if (wc_activate) {
+ /* update devname for mmap */
+ snprintf(devname, sizeof(devname),
+ "%s/" PCI_PRI_FMT "/resource%d_wc",
+ rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid,
+ loc->function, res_idx);
+
+ if (access(devname, R_OK|W_OK) != -1) {
+ fd = open(devname, O_RDWR);
+ if (fd < 0)
+ RTE_LOG(INFO, EAL, "%s cannot be mapped. "
+ "Fall-back to non prefetchable mode.\n",
+ devname);
+ }
+ }
+
+ if (!wc_activate || fd < 0) {
+ snprintf(devname, sizeof(devname),
+ "%s/" PCI_PRI_FMT "/resource%d",
+ rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid,
+ loc->function, res_idx);
+
+ /* then try to map resource file */
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
devname, strerror(errno));
- goto error;
+ goto error;
+ }
}
/* try mapping somewhere close to the end of hugepages */
diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c
index aeeaa9ed..686386d6 100644
--- a/drivers/bus/pci/linux/pci_vfio.c
+++ b/drivers/bus/pci/linux/pci_vfio.c
@@ -584,6 +584,9 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
dev->mem_resource[i].addr = maps[i].addr;
}
+ /* we need save vfio_dev_fd, so it can be used during release */
+ dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
+
return 0;
err_vfio_dev_fd:
close(vfio_dev_fd);
@@ -603,22 +606,58 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
return pci_vfio_map_resource_secondary(dev);
}
-int
-pci_vfio_unmap_resource(struct rte_pci_device *dev)
+static struct mapped_pci_resource *
+find_and_unmap_vfio_resource(struct mapped_pci_res_list *vfio_res_list,
+ struct rte_pci_device *dev,
+ const char *pci_addr)
+{
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct pci_map *maps;
+ int i;
+
+ /* Get vfio_res */
+ TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
+ if (rte_pci_addr_cmp(&vfio_res->pci_addr, &dev->addr))
+ continue;
+ break;
+ }
+
+ if (vfio_res == NULL)
+ return vfio_res;
+
+ RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
+ pci_addr);
+
+ maps = vfio_res->maps;
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+
+ /*
+ * We do not need to be aware of MSI-X table BAR mappings as
+ * when mapping. Just using current maps array is enough
+ */
+ if (maps[i].addr) {
+ RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
+ pci_addr, maps[i].addr);
+ pci_unmap_resource(maps[i].addr, maps[i].size);
+ }
+ }
+
+ return vfio_res;
+}
+
+static int
+pci_vfio_unmap_resource_primary(struct rte_pci_device *dev)
{
char pci_addr[PATH_MAX] = {0};
struct rte_pci_addr *loc = &dev->addr;
- int i, ret;
struct mapped_pci_resource *vfio_res = NULL;
struct mapped_pci_res_list *vfio_res_list;
-
- struct pci_map *maps;
+ int ret;
/* store PCI address string */
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
loc->domain, loc->bus, loc->devid, loc->function);
-
if (close(dev->intr_handle.fd) < 0) {
RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
pci_addr);
@@ -639,13 +678,10 @@ pci_vfio_unmap_resource(struct rte_pci_device *dev)
return ret;
}
- vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
- /* Get vfio_res */
- TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
- if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
- continue;
- break;
- }
+ vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+ vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
+
/* if we haven't found our tailq entry, something's wrong */
if (vfio_res == NULL) {
RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
@@ -653,30 +689,56 @@ pci_vfio_unmap_resource(struct rte_pci_device *dev)
return -1;
}
- /* unmap BARs */
- maps = vfio_res->maps;
+ TAILQ_REMOVE(vfio_res_list, vfio_res, next);
- RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
- pci_addr);
- for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+ return 0;
+}
- /*
- * We do not need to be aware of MSI-X table BAR mappings as
- * when mapping. Just using current maps array is enough
- */
- if (maps[i].addr) {
- RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
- pci_addr, maps[i].addr);
- pci_unmap_resource(maps[i].addr, maps[i].size);
- }
+static int
+pci_vfio_unmap_resource_secondary(struct rte_pci_device *dev)
+{
+ char pci_addr[PATH_MAX] = {0};
+ struct rte_pci_addr *loc = &dev->addr;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list;
+ int ret;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
+ dev->intr_handle.vfio_dev_fd);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot release device\n", __func__);
+ return ret;
}
- TAILQ_REMOVE(vfio_res_list, vfio_res, next);
+ vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+ vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
+
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ return -1;
+ }
return 0;
}
int
+pci_vfio_unmap_resource(struct rte_pci_device *dev)
+{
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return pci_vfio_unmap_resource_primary(dev);
+ else
+ return pci_vfio_unmap_resource_secondary(dev);
+}
+
+int
pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
struct rte_pci_ioport *p)
{
diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
index 7215aaec..7736b3f9 100644
--- a/drivers/bus/pci/pci_common.c
+++ b/drivers/bus/pci/pci_common.c
@@ -26,6 +26,7 @@
#include "private.h"
+
extern struct rte_pci_bus rte_pci_bus;
#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
@@ -69,7 +70,7 @@ pci_name_set(struct rte_pci_device *dev)
*/
if (devargs != NULL)
/* If an rte_devargs exists, the generic rte_device uses the
- * given name as its namea
+ * given name as its name.
*/
dev->device.name = dev->device.devargs->name;
else
@@ -155,17 +156,24 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
dev->id.device_id, dr->driver.name);
+ /*
+ * reference driver structure
+ * This needs to be before rte_pci_map_device(), as it enables to use
+ * driver flags for adjusting configuration.
+ */
+ dev->driver = dr;
+ dev->device.driver = &dr->driver;
+
if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
/* map resources for devices that use igb_uio */
ret = rte_pci_map_device(dev);
- if (ret != 0)
+ if (ret != 0) {
+ dev->driver = NULL;
+ dev->device.driver = NULL;
return ret;
+ }
}
- /* reference driver structure */
- dev->driver = dr;
- dev->device.driver = &dr->driver;
-
/* call the driver probe() function */
ret = dr->probe(dr, dev);
if (ret) {
@@ -255,81 +263,6 @@ pci_probe_all_drivers(struct rte_pci_device *dev)
}
/*
- * Find the pci device specified by pci address, then invoke probe function of
- * the driver of the device.
- */
-int
-rte_pci_probe_one(const struct rte_pci_addr *addr)
-{
- struct rte_pci_device *dev = NULL;
-
- int ret = 0;
-
- if (addr == NULL)
- return -1;
-
- /* update current pci device in global list, kernel bindings might have
- * changed since last time we looked at it.
- */
- if (pci_update_device(addr) < 0)
- goto err_return;
-
- FOREACH_DEVICE_ON_PCIBUS(dev) {
- if (rte_pci_addr_cmp(&dev->addr, addr))
- continue;
-
- ret = pci_probe_all_drivers(dev);
- if (ret)
- goto err_return;
- return 0;
- }
- return -1;
-
-err_return:
- RTE_LOG(WARNING, EAL,
- "Requested device " PCI_PRI_FMT " cannot be used\n",
- addr->domain, addr->bus, addr->devid, addr->function);
- return -1;
-}
-
-/*
- * Detach device specified by its pci address.
- */
-int
-rte_pci_detach(const struct rte_pci_addr *addr)
-{
- struct rte_pci_device *dev = NULL;
- int ret = 0;
-
- if (addr == NULL)
- return -1;
-
- FOREACH_DEVICE_ON_PCIBUS(dev) {
- if (rte_pci_addr_cmp(&dev->addr, addr))
- continue;
-
- ret = rte_pci_detach_dev(dev);
- if (ret < 0)
- /* negative value is an error */
- goto err_return;
- if (ret > 0)
- /* positive value means driver doesn't support it */
- continue;
-
- rte_pci_remove_device(dev);
- free(dev);
- return 0;
- }
- return -1;
-
-err_return:
- RTE_LOG(WARNING, EAL, "Requested device " PCI_PRI_FMT
- " cannot be used\n", dev->addr.domain, dev->addr.bus,
- dev->addr.devid, dev->addr.function);
- return -1;
-}
-
-/*
* Scan the content of the PCI bus, and call the probe() function for
* all registered drivers that have a matching entry in its id_table
* for discovered devices.
@@ -445,7 +378,7 @@ rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
}
/* Remove a device from PCI bus */
-void
+static void
rte_pci_remove_device(struct rte_pci_device *pci_dev)
{
TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next);
diff --git a/drivers/bus/pci/private.h b/drivers/bus/pci/private.h
index 88fa587e..8ddd03e1 100644
--- a/drivers/bus/pci/private.h
+++ b/drivers/bus/pci/private.h
@@ -33,36 +33,6 @@ rte_pci_probe(void);
int rte_pci_scan(void);
/**
- * Probe the single PCI device.
- *
- * Scan the content of the PCI bus, and find the pci device specified by pci
- * address, then call the probe() function for registered driver that has a
- * matching entry in its id_table for discovered device.
- *
- * @param addr
- * The PCI Bus-Device-Function address to probe.
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_pci_probe_one(const struct rte_pci_addr *addr);
-
-/**
- * Close the single PCI device.
- *
- * Scan the content of the PCI bus, and find the pci device specified by pci
- * address, then call the remove() function for registered driver that has a
- * matching entry in its id_table for discovered device.
- *
- * @param addr
- * The PCI Bus-Device-Function address to close.
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_pci_detach(const struct rte_pci_addr *addr);
-
-/**
* Find the name of a PCI device.
*/
void
@@ -94,16 +64,6 @@ void rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
struct rte_pci_device *new_pci_dev);
/**
- * Remove a PCI device from the PCI Bus. This sets to NULL the bus references
- * in the PCI device object as well as the generic device object.
- *
- * @param pci_device
- * PCI device to be removed from PCI Bus
- * @return void
- */
-void rte_pci_remove_device(struct rte_pci_device *pci_device);
-
-/**
* Update a pci device object by asking the kernel for the latest information.
*
* This function is private to EAL.
@@ -117,16 +77,6 @@ void rte_pci_remove_device(struct rte_pci_device *pci_device);
int pci_update_device(const struct rte_pci_addr *addr);
/**
- * Unbind kernel driver for this device
- *
- * This function is private to EAL.
- *
- * @return
- * 0 on success, negative on error
- */
-int pci_unbind_kernel_driver(struct rte_pci_device *dev);
-
-/**
* Map the PCI resource of a PCI device in virtual memory
*
* This function is private to EAL.
diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h
index 458e6d07..0d1955ff 100644
--- a/drivers/bus/pci/rte_bus_pci.h
+++ b/drivers/bus/pci/rte_bus_pci.h
@@ -135,6 +135,8 @@ struct rte_pci_bus {
/** Device needs PCI BAR mapping (done with either IGB_UIO or VFIO) */
#define RTE_PCI_DRV_NEED_MAPPING 0x0001
+/** Device needs PCI BAR mapping with enabled write combining (wc) */
+#define RTE_PCI_DRV_WC_ACTIVATE 0x0002
/** Device driver supports link state interrupt */
#define RTE_PCI_DRV_INTR_LSC 0x0008
/** Device driver supports device removal interrupt */
@@ -189,8 +191,7 @@ void rte_pci_register(struct rte_pci_driver *driver);
/** Helper for PCI device registration from driver (eth, crypto) instance */
#define RTE_PMD_REGISTER_PCI(nm, pci_drv) \
-RTE_INIT(pciinitfn_ ##nm); \
-static void pciinitfn_ ##nm(void) \
+RTE_INIT(pciinitfn_ ##nm) \
{\
(pci_drv).driver.name = RTE_STR(nm);\
rte_pci_register(&pci_drv); \
diff --git a/drivers/bus/vdev/rte_bus_vdev.h b/drivers/bus/vdev/rte_bus_vdev.h
index f9b5eb59..9ae3eaae 100644
--- a/drivers/bus/vdev/rte_bus_vdev.h
+++ b/drivers/bus/vdev/rte_bus_vdev.h
@@ -86,9 +86,8 @@ void rte_vdev_register(struct rte_vdev_driver *driver);
void rte_vdev_unregister(struct rte_vdev_driver *driver);
#define RTE_PMD_REGISTER_VDEV(nm, vdrv)\
-RTE_INIT(vdrvinitfn_ ##vdrv);\
static const char *vdrvinit_ ## nm ## _alias;\
-static void vdrvinitfn_ ##vdrv(void)\
+RTE_INIT(vdrvinitfn_ ##vdrv)\
{\
(vdrv).driver.name = RTE_STR(nm);\
(vdrv).driver.alias = vdrvinit_ ## nm ## _alias;\
diff --git a/drivers/bus/vmbus/Makefile b/drivers/bus/vmbus/Makefile
new file mode 100644
index 00000000..deee9dd1
--- /dev/null
+++ b/drivers/bus/vmbus/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_bus_vmbus.a
+LIBABIVER := 1
+EXPORT_MAP := rte_bus_vmbus_version.map
+
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),)
+SYSTEM := linux
+endif
+ifneq ($(CONFIG_RTE_EXEC_ENV_BSDAPP),)
+$(error "VMBUS not implemented for BSD yet")
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/vmbus/$(SYSTEM)
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
+
+include $(RTE_SDK)/drivers/bus/vmbus/$(SYSTEM)/Makefile
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) := $(addprefix $(SYSTEM)/,$(SRCS))
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_channel.c vmbus_bufring.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_common_uio.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_VMBUS)-include += rte_bus_vmbus.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_VMBUS)-include += rte_vmbus_reg.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/vmbus/linux/Makefile b/drivers/bus/vmbus/linux/Makefile
new file mode 100644
index 00000000..ef0d30b2
--- /dev/null
+++ b/drivers/bus/vmbus/linux/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+SRCS += vmbus_bus.c vmbus_uio.c
diff --git a/drivers/bus/vmbus/linux/vmbus_bus.c b/drivers/bus/vmbus/linux/vmbus_bus.c
new file mode 100644
index 00000000..52d6a3c0
--- /dev/null
+++ b/drivers/bus/vmbus/linux/vmbus_bus.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#include <rte_eal.h>
+#include <rte_uuid.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_devargs.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_bus_vmbus.h>
+
+#include "eal_filesystem.h"
+#include "private.h"
+
+/** Pathname of VMBUS devices directory. */
+#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices"
+
+extern struct rte_vmbus_bus rte_vmbus_bus;
+
+/* Read sysfs file to get UUID */
+static int
+parse_sysfs_uuid(const char *filename, rte_uuid_t uu)
+{
+ char buf[BUFSIZ];
+ char *cp, *in = buf;
+ FILE *f;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ VMBUS_LOG(ERR, "cannot open sysfs value %s: %s",
+ filename, strerror(errno));
+ return -1;
+ }
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ VMBUS_LOG(ERR, "cannot read sysfs value %s",
+ filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ cp = strchr(buf, '\n');
+ if (cp)
+ *cp = '\0';
+
+ /* strip { } notation */
+ if (buf[0] == '{') {
+ in = buf + 1;
+ cp = strchr(in, '}');
+ if (cp)
+ *cp = '\0';
+ }
+
+ if (rte_uuid_parse(in, uu) < 0) {
+ VMBUS_LOG(ERR, "%s %s not a valid UUID",
+ filename, buf);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+get_sysfs_string(const char *filename, char *buf, size_t buflen)
+{
+ char *cp;
+ FILE *f;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ VMBUS_LOG(ERR, "cannot open sysfs value %s:%s",
+ filename, strerror(errno));
+ return -1;
+ }
+
+ if (fgets(buf, buflen, f) == NULL) {
+ VMBUS_LOG(ERR, "cannot read sysfs value %s",
+ filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ /* remove trailing newline */
+ cp = memchr(buf, '\n', buflen);
+ if (cp)
+ *cp = '\0';
+
+ return 0;
+}
+
+static int
+vmbus_get_uio_dev(const struct rte_vmbus_device *dev,
+ char *dstbuf, size_t buflen)
+{
+ char dirname[PATH_MAX];
+ unsigned int uio_num;
+ struct dirent *e;
+ DIR *dir;
+
+ /* Assume recent kernel where uio is in uio/uioX */
+ snprintf(dirname, sizeof(dirname),
+ SYSFS_VMBUS_DEVICES "/%s/uio", dev->device.name);
+
+ dir = opendir(dirname);
+ if (dir == NULL)
+ return -1; /* Not a UIO device */
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ const int prefix_len = 3;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", prefix_len) != 0)
+ continue;
+
+ /* try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + prefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + prefix_len)) {
+ snprintf(dstbuf, buflen, "%s/uio%u", dirname, uio_num);
+ break;
+ }
+ }
+ closedir(dir);
+
+ if (e == NULL)
+ return -1;
+
+ return uio_num;
+}
+
+/* Check map names with kernel names */
+static const char *map_names[VMBUS_MAX_RESOURCE] = {
+ [HV_TXRX_RING_MAP] = "txrx_rings",
+ [HV_INT_PAGE_MAP] = "int_page",
+ [HV_MON_PAGE_MAP] = "monitor_page",
+ [HV_RECV_BUF_MAP] = "recv:",
+ [HV_SEND_BUF_MAP] = "send:",
+};
+
+
+/* map the resources of a vmbus device in virtual memory */
+int
+rte_vmbus_map_device(struct rte_vmbus_device *dev)
+{
+ char uioname[PATH_MAX], filename[PATH_MAX];
+ char dirname[PATH_MAX], mapname[64];
+ int i;
+
+ dev->uio_num = vmbus_get_uio_dev(dev, uioname, sizeof(uioname));
+ if (dev->uio_num < 0) {
+ VMBUS_LOG(DEBUG, "Not managed by UIO driver, skipped");
+ return 1;
+ }
+
+ /* Extract resource value */
+ for (i = 0; i < VMBUS_MAX_RESOURCE; i++) {
+ struct rte_mem_resource *res = &dev->resource[i];
+ unsigned long len, gpad = 0;
+ char *cp;
+
+ snprintf(dirname, sizeof(dirname),
+ "%s/maps/map%d", uioname, i);
+
+ snprintf(filename, sizeof(filename),
+ "%s/name", dirname);
+
+ if (get_sysfs_string(filename, mapname, sizeof(mapname)) < 0) {
+ VMBUS_LOG(ERR, "could not read %s", filename);
+ return -1;
+ }
+
+ if (strncmp(map_names[i], mapname, strlen(map_names[i])) != 0) {
+ VMBUS_LOG(ERR,
+ "unexpected resource %s (expected %s)",
+ mapname, map_names[i]);
+ return -1;
+ }
+
+ snprintf(filename, sizeof(filename),
+ "%s/size", dirname);
+ if (eal_parse_sysfs_value(filename, &len) < 0) {
+ VMBUS_LOG(ERR,
+ "could not read %s", filename);
+ return -1;
+ }
+ res->len = len;
+
+ /* both send and receive buffers have gpad in name */
+ cp = memchr(mapname, ':', sizeof(mapname));
+ if (cp)
+ gpad = strtoul(cp+1, NULL, 0);
+
+ /* put the GPAD value in physical address */
+ res->phys_addr = gpad;
+ }
+
+ return vmbus_uio_map_resource(dev);
+}
+
+void
+rte_vmbus_unmap_device(struct rte_vmbus_device *dev)
+{
+ vmbus_uio_unmap_resource(dev);
+}
+
+/* Scan one vmbus sysfs entry, and fill the devices list from it. */
+static int
+vmbus_scan_one(const char *name)
+{
+ struct rte_vmbus_device *dev, *dev2;
+ char filename[PATH_MAX];
+ char dirname[PATH_MAX];
+ unsigned long tmp;
+
+ dev = calloc(1, sizeof(*dev));
+ if (dev == NULL)
+ return -1;
+
+ dev->device.name = strdup(name);
+ if (!dev->device.name)
+ goto error;
+
+ /* sysfs base directory
+ * /sys/bus/vmbus/devices/7a08391f-f5a0-4ac0-9802-d13fd964f8df
+ * or on older kernel
+ * /sys/bus/vmbus/devices/vmbus_1
+ */
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ SYSFS_VMBUS_DEVICES, name);
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device_id", dirname);
+ if (parse_sysfs_uuid(filename, dev->device_id) < 0)
+ goto error;
+
+ /* get device class */
+ snprintf(filename, sizeof(filename), "%s/class_id", dirname);
+ if (parse_sysfs_uuid(filename, dev->class_id) < 0)
+ goto error;
+
+ /* get relid */
+ snprintf(filename, sizeof(filename), "%s/id", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->relid = tmp;
+
+ /* get monitor id */
+ snprintf(filename, sizeof(filename), "%s/monitor_id", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->monitor_id = tmp;
+
+ /* get numa node (if present) */
+ snprintf(filename, sizeof(filename), "%s/numa_node",
+ dirname);
+
+ if (access(filename, R_OK) == 0) {
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->device.numa_node = tmp;
+ } else {
+ /* if no NUMA support, set default to 0 */
+ dev->device.numa_node = SOCKET_ID_ANY;
+ }
+
+ /* device is valid, add in list (sorted) */
+ VMBUS_LOG(DEBUG, "Adding vmbus device %s", name);
+
+ TAILQ_FOREACH(dev2, &rte_vmbus_bus.device_list, next) {
+ int ret;
+
+ ret = rte_uuid_compare(dev->device_id, dev2->device_id);
+ if (ret > 0)
+ continue;
+
+ if (ret < 0) {
+ vmbus_insert_device(dev2, dev);
+ } else { /* already registered */
+ VMBUS_LOG(NOTICE,
+ "%s already registered", name);
+ free(dev);
+ }
+ return 0;
+ }
+
+ vmbus_add_device(dev);
+ return 0;
+error:
+ VMBUS_LOG(DEBUG, "failed");
+
+ free(dev);
+ return -1;
+}
+
+/*
+ * Scan the content of the vmbus, and the devices in the devices list
+ */
+int
+rte_vmbus_scan(void)
+{
+ struct dirent *e;
+ DIR *dir;
+
+ dir = opendir(SYSFS_VMBUS_DEVICES);
+ if (dir == NULL) {
+ if (errno == ENOENT)
+ return 0;
+
+ VMBUS_LOG(ERR, "opendir %s failed: %s",
+ SYSFS_VMBUS_DEVICES, strerror(errno));
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ if (e->d_name[0] == '.')
+ continue;
+
+ if (vmbus_scan_one(e->d_name) < 0)
+ goto error;
+ }
+ closedir(dir);
+ return 0;
+
+error:
+ closedir(dir);
+ return -1;
+}
+
+void rte_vmbus_irq_mask(struct rte_vmbus_device *device)
+{
+ vmbus_uio_irq_control(device, 1);
+}
+
+void rte_vmbus_irq_unmask(struct rte_vmbus_device *device)
+{
+ vmbus_uio_irq_control(device, 0);
+}
+
+int rte_vmbus_irq_read(struct rte_vmbus_device *device)
+{
+ return vmbus_uio_irq_read(device);
+}
diff --git a/drivers/bus/vmbus/linux/vmbus_uio.c b/drivers/bus/vmbus/linux/vmbus_uio.c
new file mode 100644
index 00000000..856c6d66
--- /dev/null
+++ b/drivers/bus/vmbus/linux/vmbus_uio.c
@@ -0,0 +1,398 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_bus_vmbus.h>
+#include <rte_string_fns.h>
+
+#include "private.h"
+
+/** Pathname of VMBUS devices directory. */
+#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices"
+
+static void *vmbus_map_addr;
+
+/* Control interrupts */
+void vmbus_uio_irq_control(struct rte_vmbus_device *dev, int32_t onoff)
+{
+ if (write(dev->intr_handle.fd, &onoff, sizeof(onoff)) < 0) {
+ VMBUS_LOG(ERR, "cannot write to %d:%s",
+ dev->intr_handle.fd, strerror(errno));
+ }
+}
+
+int vmbus_uio_irq_read(struct rte_vmbus_device *dev)
+{
+ int32_t count;
+ int cc;
+
+ cc = read(dev->intr_handle.fd, &count, sizeof(count));
+ if (cc < (int)sizeof(count)) {
+ if (cc < 0) {
+ VMBUS_LOG(ERR, "IRQ read failed %s",
+ strerror(errno));
+ return -errno;
+ }
+ VMBUS_LOG(ERR, "can't read IRQ count");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+void
+vmbus_uio_free_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource *uio_res)
+{
+ rte_free(uio_res);
+
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ if (dev->intr_handle.fd >= 0) {
+ close(dev->intr_handle.fd);
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ }
+}
+
+int
+vmbus_uio_alloc_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource **uio_res)
+{
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+
+ /* save fd if in primary process */
+ snprintf(devname, sizeof(devname), "/dev/uio%u", dev->uio_num);
+ dev->intr_handle.fd = open(devname, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ devname, strerror(errno));
+ goto error;
+ }
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO_INTX;
+
+ /* allocate the mapping details for secondary processes*/
+ *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
+ if (*uio_res == NULL) {
+ VMBUS_LOG(ERR, "cannot store uio mmap details");
+ goto error;
+ }
+
+ strlcpy((*uio_res)->path, devname, PATH_MAX);
+ rte_uuid_copy((*uio_res)->id, dev->device_id);
+
+ return 0;
+
+error:
+ vmbus_uio_free_resource(dev, *uio_res);
+ return -1;
+}
+
+static int
+find_max_end_va(const struct rte_memseg_list *msl, void *arg)
+{
+ size_t sz = msl->memseg_arr.len * msl->page_sz;
+ void *end_va = RTE_PTR_ADD(msl->base_va, sz);
+ void **max_va = arg;
+
+ if (*max_va < end_va)
+ *max_va = end_va;
+ return 0;
+}
+
+/*
+ * TODO: this should be part of memseg api.
+ * code is duplicated from PCI.
+ */
+static void *
+vmbus_find_max_end_va(void)
+{
+ void *va = NULL;
+
+ rte_memseg_list_walk(find_max_end_va, &va);
+ return va;
+}
+
+int
+vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int idx,
+ struct mapped_vmbus_resource *uio_res,
+ int flags)
+{
+ size_t size = dev->resource[idx].len;
+ struct vmbus_map *maps = uio_res->maps;
+ void *mapaddr;
+ off_t offset;
+ int fd;
+
+ /* devname for mmap */
+ fd = open(uio_res->path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ uio_res->path, strerror(errno));
+ return -1;
+ }
+
+ /* try mapping somewhere close to the end of hugepages */
+ if (vmbus_map_addr == NULL)
+ vmbus_map_addr = vmbus_find_max_end_va();
+
+ /* offset is special in uio it indicates which resource */
+ offset = idx * PAGE_SIZE;
+
+ mapaddr = vmbus_map_resource(vmbus_map_addr, fd, offset, size, flags);
+ close(fd);
+
+ if (mapaddr == MAP_FAILED)
+ return -1;
+
+ dev->resource[idx].addr = mapaddr;
+ vmbus_map_addr = RTE_PTR_ADD(mapaddr, size);
+
+ /* Record result of sucessful mapping for use by secondary */
+ maps[idx].addr = mapaddr;
+ maps[idx].size = size;
+
+ return 0;
+}
+
+static int vmbus_uio_map_primary(struct vmbus_channel *chan,
+ void **ring_buf, uint32_t *ring_size)
+{
+ struct mapped_vmbus_resource *uio_res;
+
+ uio_res = vmbus_uio_find_resource(chan->device);
+ if (!uio_res) {
+ VMBUS_LOG(ERR, "can not find resources!");
+ return -ENOMEM;
+ }
+
+ if (uio_res->nb_maps < VMBUS_MAX_RESOURCE) {
+ VMBUS_LOG(ERR, "VMBUS: only %u resources found!",
+ uio_res->nb_maps);
+ return -EINVAL;
+ }
+
+ *ring_size = uio_res->maps[HV_TXRX_RING_MAP].size / 2;
+ *ring_buf = uio_res->maps[HV_TXRX_RING_MAP].addr;
+ return 0;
+}
+
+static int vmbus_uio_map_subchan(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan,
+ void **ring_buf, uint32_t *ring_size)
+{
+ char ring_path[PATH_MAX];
+ size_t file_size;
+ struct stat sb;
+ int fd;
+
+ snprintf(ring_path, sizeof(ring_path),
+ "%s/%s/channels/%u/ring",
+ SYSFS_VMBUS_DEVICES, dev->device.name,
+ chan->relid);
+
+ fd = open(ring_path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ ring_path, strerror(errno));
+ return -errno;
+ }
+
+ if (fstat(fd, &sb) < 0) {
+ VMBUS_LOG(ERR, "Cannot state %s: %s",
+ ring_path, strerror(errno));
+ close(fd);
+ return -errno;
+ }
+ file_size = sb.st_size;
+
+ if (file_size == 0 || (file_size & (PAGE_SIZE - 1))) {
+ VMBUS_LOG(ERR, "incorrect size %s: %zu",
+ ring_path, file_size);
+
+ close(fd);
+ return -EINVAL;
+ }
+
+ *ring_size = file_size / 2;
+ *ring_buf = vmbus_map_resource(vmbus_map_addr, fd,
+ 0, sb.st_size, 0);
+ close(fd);
+
+ if (ring_buf == MAP_FAILED)
+ return -EIO;
+
+ vmbus_map_addr = RTE_PTR_ADD(ring_buf, file_size);
+ return 0;
+}
+
+int vmbus_uio_map_rings(struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *dev = chan->device;
+ uint32_t ring_size;
+ void *ring_buf;
+ int ret;
+
+ /* Primary channel */
+ if (chan->subchannel_id == 0)
+ ret = vmbus_uio_map_primary(chan, &ring_buf, &ring_size);
+ else
+ ret = vmbus_uio_map_subchan(dev, chan, &ring_buf, &ring_size);
+
+ if (ret)
+ return ret;
+
+ vmbus_br_setup(&chan->txbr, ring_buf, ring_size);
+ vmbus_br_setup(&chan->rxbr, (char *)ring_buf + ring_size, ring_size);
+ return 0;
+}
+
+static int vmbus_uio_sysfs_read(const char *dir, const char *name,
+ unsigned long *val, unsigned long max_range)
+{
+ char path[PATH_MAX];
+ FILE *f;
+ int ret;
+
+ snprintf(path, sizeof(path), "%s/%s", dir, name);
+ f = fopen(path, "r");
+ if (!f) {
+ VMBUS_LOG(ERR, "can't open %s:%s",
+ path, strerror(errno));
+ return -errno;
+ }
+
+ if (fscanf(f, "%lu", val) != 1)
+ ret = -EIO;
+ else if (*val > max_range)
+ ret = -ERANGE;
+ else
+ ret = 0;
+ fclose(f);
+
+ return ret;
+}
+
+static bool vmbus_uio_ring_present(const struct rte_vmbus_device *dev,
+ uint32_t relid)
+{
+ char ring_path[PATH_MAX];
+
+ /* Check if kernel has subchannel sysfs files */
+ snprintf(ring_path, sizeof(ring_path),
+ "%s/%s/channels/%u/ring",
+ SYSFS_VMBUS_DEVICES, dev->device.name, relid);
+
+ return access(ring_path, R_OK|W_OK) == 0;
+}
+
+bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan)
+{
+ return vmbus_uio_ring_present(dev, chan->relid);
+}
+
+static bool vmbus_isnew_subchannel(struct vmbus_channel *primary,
+ unsigned long id)
+{
+ const struct vmbus_channel *c;
+
+ STAILQ_FOREACH(c, &primary->subchannel_list, next) {
+ if (c->relid == id)
+ return false;
+ }
+ return true;
+}
+
+int vmbus_uio_get_subchan(struct vmbus_channel *primary,
+ struct vmbus_channel **subchan)
+{
+ const struct rte_vmbus_device *dev = primary->device;
+ char chan_path[PATH_MAX], subchan_path[PATH_MAX];
+ struct dirent *ent;
+ DIR *chan_dir;
+
+ snprintf(chan_path, sizeof(chan_path),
+ "%s/%s/channels",
+ SYSFS_VMBUS_DEVICES, dev->device.name);
+
+ chan_dir = opendir(chan_path);
+ if (!chan_dir) {
+ VMBUS_LOG(ERR, "cannot open %s: %s",
+ chan_path, strerror(errno));
+ return -errno;
+ }
+
+ while ((ent = readdir(chan_dir))) {
+ unsigned long relid, subid, monid;
+ char *endp;
+ int err;
+
+ if (ent->d_name[0] == '.')
+ continue;
+
+ errno = 0;
+ relid = strtoul(ent->d_name, &endp, 0);
+ if (*endp || errno != 0 || relid > UINT16_MAX) {
+ VMBUS_LOG(NOTICE, "not a valid channel relid: %s",
+ ent->d_name);
+ continue;
+ }
+
+ snprintf(subchan_path, sizeof(subchan_path), "%s/%lu",
+ chan_path, relid);
+ err = vmbus_uio_sysfs_read(subchan_path, "subchannel_id",
+ &subid, UINT16_MAX);
+ if (err) {
+ VMBUS_LOG(NOTICE, "invalid subchannel id %lu",
+ subid);
+ closedir(chan_dir);
+ return err;
+ }
+
+ if (subid == 0)
+ continue; /* skip primary channel */
+
+ if (!vmbus_isnew_subchannel(primary, relid))
+ continue;
+
+ if (!vmbus_uio_ring_present(dev, relid))
+ continue; /* Ring may not be ready yet */
+
+ err = vmbus_uio_sysfs_read(subchan_path, "monitor_id",
+ &monid, UINT8_MAX);
+ if (err) {
+ VMBUS_LOG(NOTICE, "invalid monitor id %lu",
+ monid);
+ return err;
+ }
+
+ err = vmbus_chan_create(dev, relid, subid, monid, subchan);
+ if (err) {
+ VMBUS_LOG(NOTICE, "subchannel setup failed");
+ return err;
+ }
+ break;
+ }
+ closedir(chan_dir);
+
+ return (ent == NULL) ? -ENOENT : 0;
+}
diff --git a/drivers/bus/vmbus/meson.build b/drivers/bus/vmbus/meson.build
new file mode 100644
index 00000000..18daabec
--- /dev/null
+++ b/drivers/bus/vmbus/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+allow_experimental_apis = true
+
+install_headers('rte_bus_vmbus.h','rte_vmbus_reg.h')
+
+sources = files('vmbus_common.c',
+ 'vmbus_channel.c',
+ 'vmbus_bufring.c',
+ 'vmbus_common_uio.c')
+
+if host_machine.system() == 'linux'
+ sources += files('linux/vmbus_bus.c',
+ 'linux/vmbus_uio.c')
+ includes += include_directories('linux')
+else
+ build = false
+endif
diff --git a/drivers/bus/vmbus/private.h b/drivers/bus/vmbus/private.h
new file mode 100644
index 00000000..9964fc42
--- /dev/null
+++ b/drivers/bus/vmbus/private.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_PRIVATE_H_
+#define _VMBUS_PRIVATE_H_
+
+#include <stdbool.h>
+#include <sys/uio.h>
+#include <rte_log.h>
+#include <rte_vmbus_reg.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+extern int vmbus_logtype_bus;
+#define VMBUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vmbus_logtype_bus, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+struct vmbus_br {
+ struct vmbus_bufring *vbr;
+ uint32_t dsize;
+ uint32_t windex; /* next available location */
+};
+
+#define UIO_NAME_MAX 64
+
+struct vmbus_map {
+ void *addr; /* user mmap of resource */
+ uint64_t size; /* length */
+};
+
+/*
+ * For multi-process we need to reproduce all vmbus mappings in secondary
+ * processes, so save them in a tailq.
+ */
+struct mapped_vmbus_resource {
+ TAILQ_ENTRY(mapped_vmbus_resource) next;
+
+ rte_uuid_t id;
+ int nb_maps;
+ struct vmbus_map maps[VMBUS_MAX_RESOURCE];
+ char path[PATH_MAX];
+};
+
+TAILQ_HEAD(mapped_vmbus_res_list, mapped_vmbus_resource);
+
+#define HV_MON_TRIG_LEN 32
+#define HV_MON_TRIG_MAX 4
+
+struct vmbus_channel {
+ STAILQ_HEAD(, vmbus_channel) subchannel_list;
+ STAILQ_ENTRY(vmbus_channel) next;
+ const struct rte_vmbus_device *device;
+
+ struct vmbus_br rxbr;
+ struct vmbus_br txbr;
+
+ uint16_t relid;
+ uint16_t subchannel_id;
+ uint8_t monitor_id;
+};
+
+#define VMBUS_MAX_CHANNELS 64
+
+int vmbus_chan_create(const struct rte_vmbus_device *device,
+ uint16_t relid, uint16_t subid, uint8_t monitor_id,
+ struct vmbus_channel **new_chan);
+
+void vmbus_add_device(struct rte_vmbus_device *vmbus_dev);
+void vmbus_insert_device(struct rte_vmbus_device *exist_vmbus_dev,
+ struct rte_vmbus_device *new_vmbus_dev);
+void vmbus_remove_device(struct rte_vmbus_device *vmbus_device);
+
+void vmbus_uio_irq_control(struct rte_vmbus_device *dev, int32_t onoff);
+int vmbus_uio_irq_read(struct rte_vmbus_device *dev);
+
+int vmbus_uio_map_resource(struct rte_vmbus_device *dev);
+void vmbus_uio_unmap_resource(struct rte_vmbus_device *dev);
+
+int vmbus_uio_alloc_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource **uio_res);
+void vmbus_uio_free_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource *uio_res);
+
+struct mapped_vmbus_resource *
+vmbus_uio_find_resource(const struct rte_vmbus_device *dev);
+int vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int res_idx,
+ struct mapped_vmbus_resource *uio_res,
+ int flags);
+
+void *vmbus_map_resource(void *requested_addr, int fd, off_t offset,
+ size_t size, int additional_flags);
+void vmbus_unmap_resource(void *requested_addr, size_t size);
+
+bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan);
+int vmbus_uio_get_subchan(struct vmbus_channel *primary,
+ struct vmbus_channel **subchan);
+int vmbus_uio_map_rings(struct vmbus_channel *chan);
+
+void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen);
+
+/* Amount of space available for write */
+static inline uint32_t
+vmbus_br_availwrite(const struct vmbus_br *br, uint32_t windex)
+{
+ uint32_t rindex = br->vbr->rindex;
+
+ if (windex >= rindex)
+ return br->dsize - (windex - rindex);
+ else
+ return rindex - windex;
+}
+
+static inline uint32_t
+vmbus_br_availread(const struct vmbus_br *br)
+{
+ return br->dsize - vmbus_br_availwrite(br, br->vbr->windex);
+}
+
+int vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen,
+ bool *need_sig);
+
+int vmbus_rxbr_peek(const struct vmbus_br *rbr, void *data, size_t dlen);
+
+int vmbus_rxbr_read(struct vmbus_br *rbr, void *data, size_t dlen, size_t hlen);
+
+#endif /* _VMBUS_PRIVATE_H_ */
diff --git a/drivers/bus/vmbus/rte_bus_vmbus.h b/drivers/bus/vmbus/rte_bus_vmbus.h
new file mode 100644
index 00000000..4a2c1f6f
--- /dev/null
+++ b/drivers/bus/vmbus/rte_bus_vmbus.h
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_H_
+#define _VMBUS_H_
+
+/**
+ * @file
+ *
+ * VMBUS Interface
+ */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_compat.h>
+#include <rte_uuid.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_dev.h>
+#include <rte_vmbus_reg.h>
+
+/* Forward declarations */
+struct rte_vmbus_device;
+struct rte_vmbus_driver;
+struct rte_vmbus_bus;
+struct vmbus_channel;
+struct vmbus_mon_page;
+
+TAILQ_HEAD(rte_vmbus_device_list, rte_vmbus_device);
+TAILQ_HEAD(rte_vmbus_driver_list, rte_vmbus_driver);
+
+/* VMBus iterators */
+#define FOREACH_DEVICE_ON_VMBUS(p) \
+ TAILQ_FOREACH(p, &(rte_vmbus_bus.device_list), next)
+
+#define FOREACH_DRIVER_ON_VMBUS(p) \
+ TAILQ_FOREACH(p, &(rte_vmbus_bus.driver_list), next)
+
+/** Maximum number of VMBUS resources. */
+enum hv_uio_map {
+ HV_TXRX_RING_MAP = 0,
+ HV_INT_PAGE_MAP,
+ HV_MON_PAGE_MAP,
+ HV_RECV_BUF_MAP,
+ HV_SEND_BUF_MAP
+};
+#define VMBUS_MAX_RESOURCE 5
+
+/**
+ * A structure describing a VMBUS device.
+ */
+struct rte_vmbus_device {
+ TAILQ_ENTRY(rte_vmbus_device) next; /**< Next probed VMBUS device */
+ const struct rte_vmbus_driver *driver; /**< Associated driver */
+ struct rte_device device; /**< Inherit core device */
+ rte_uuid_t device_id; /**< VMBUS device id */
+ rte_uuid_t class_id; /**< VMBUS device type */
+ uint32_t relid; /**< id for primary */
+ uint8_t monitor_id; /**< monitor page */
+ int uio_num; /**< UIO device number */
+ uint32_t *int_page; /**< VMBUS interrupt page */
+ struct vmbus_channel *primary; /**< VMBUS primary channel */
+ struct vmbus_mon_page *monitor_page; /**< VMBUS monitor page */
+
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_mem_resource resource[VMBUS_MAX_RESOURCE];
+};
+
+/**
+ * Initialization function for the driver called during VMBUS probing.
+ */
+typedef int (vmbus_probe_t)(struct rte_vmbus_driver *,
+ struct rte_vmbus_device *);
+
+/**
+ * Initialization function for the driver called during hot plugging.
+ */
+typedef int (vmbus_remove_t)(struct rte_vmbus_device *);
+
+/**
+ * A structure describing a VMBUS driver.
+ */
+struct rte_vmbus_driver {
+ TAILQ_ENTRY(rte_vmbus_driver) next; /**< Next in list. */
+ struct rte_driver driver;
+ struct rte_vmbus_bus *bus; /**< VM bus reference. */
+ vmbus_probe_t *probe; /**< Device Probe function. */
+ vmbus_remove_t *remove; /**< Device Remove function. */
+
+ const rte_uuid_t *id_table; /**< ID table. */
+};
+
+
+/**
+ * Structure describing the VM bus
+ */
+struct rte_vmbus_bus {
+ struct rte_bus bus; /**< Inherit the generic class */
+ struct rte_vmbus_device_list device_list; /**< List of devices */
+ struct rte_vmbus_driver_list driver_list; /**< List of drivers */
+};
+
+/**
+ * Scan the content of the VMBUS bus, and the devices in the devices
+ * list
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_vmbus_scan(void);
+
+/**
+ * Probe the VMBUS bus
+ *
+ * @return
+ * - 0 on success.
+ * - !0 on error.
+ */
+int rte_vmbus_probe(void);
+
+/**
+ * Map the VMBUS device resources in user space virtual memory address
+ *
+ * @param dev
+ * A pointer to a rte_vmbus_device structure describing the device
+ * to use
+ *
+ * @return
+ * 0 on success, negative on error and positive if no driver
+ * is found for the device.
+ */
+int rte_vmbus_map_device(struct rte_vmbus_device *dev);
+
+/**
+ * Unmap this device
+ *
+ * @param dev
+ * A pointer to a rte_vmbus_device structure describing the device
+ * to use
+ */
+void rte_vmbus_unmap_device(struct rte_vmbus_device *dev);
+
+/**
+ * Get connection to primary VMBUS channel
+ *
+ * @param device
+ * A pointer to a rte_vmbus_device structure describing the device
+ * @param chan
+ * A pointer to a VMBUS channel pointer that will be filled.
+ * @return
+ * - 0 Success; channel opened.
+ * - -ENOMEM: Not enough memory available.
+ * - -EINVAL: Regions could not be mapped.
+ */
+int rte_vmbus_chan_open(struct rte_vmbus_device *device,
+ struct vmbus_channel **chan);
+
+/**
+ * Free connection to VMBUS channel
+ *
+ * @param chan
+ * VMBUS channel
+ */
+void rte_vmbus_chan_close(struct vmbus_channel *chan);
+
+/**
+ * Gets the maximum number of channels supported on device
+ *
+ * @param device
+ * A pointer to a rte_vmbus_device structure describing the device
+ * @return
+ * Number of channels available.
+ */
+int rte_vmbus_max_channels(const struct rte_vmbus_device *device);
+
+/**
+ * Get a connection to new secondary vmbus channel
+ *
+ * @param primary
+ * A pointer to primary VMBUS channel
+ * @param chan
+ * A pointer to a secondary VMBUS channel pointer that will be filled.
+ * @return
+ * - 0 Success; channel opened.
+ * - -ENOMEM: Not enough memory available.
+ * - -EINVAL: Regions could not be mapped.
+ */
+int rte_vmbus_subchan_open(struct vmbus_channel *primary,
+ struct vmbus_channel **new_chan);
+
+/**
+ * Disable IRQ for device
+ *
+ * @param device
+ * VMBUS device
+ */
+void rte_vmbus_irq_mask(struct rte_vmbus_device *device);
+
+/**
+ * Enable IRQ for device
+ *
+ * @param device
+ * VMBUS device
+ */
+void rte_vmbus_irq_unmask(struct rte_vmbus_device *device);
+
+/**
+ * Read (and wait) for IRQ
+ *
+ * @param device
+ * VMBUS device
+ */
+int rte_vmbus_irq_read(struct rte_vmbus_device *device);
+
+/**
+ * Test if channel is empty
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @return
+ * Return true if no data present in incoming ring.
+ */
+bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel);
+
+/**
+ * Send the specified buffer on the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param type
+ * Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ * @param data
+ * Pointer to the buffer to send
+ * @param dlen
+ * Number of bytes of data to send
+ * @param xact
+ * Identifier of the request
+ * @param flags
+ * Message type inband, rxbuf, gpa
+ * @param need_sig
+ * Is host signal tx is required (optional)
+ *
+ * Sends data in buffer directly to hyper-v via the vmbus
+ */
+int rte_vmbus_chan_send(struct vmbus_channel *channel, uint16_t type,
+ void *data, uint32_t dlen,
+ uint64_t xact, uint32_t flags, bool *need_sig);
+
+/**
+ * Explicitly signal host that data is available
+ *
+ * @param
+ * Pointer to vmbus_channel structure.
+ *
+ * Used when batching multiple sends and only signaling host
+ * after the last send.
+ */
+void rte_vmbus_chan_signal_tx(const struct vmbus_channel *channel);
+
+/* Structure for scatter/gather I/O */
+struct iova_list {
+ rte_iova_t addr;
+ uint32_t len;
+};
+#define MAX_PAGE_BUFFER_COUNT 32
+
+/**
+ * Send a scattered buffer on the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param type
+ * Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ * @param gpa
+ * Array of buffers to send
+ * @param gpacnt
+ * Number of elements in iov
+ * @param data
+ * Pointer to the buffer additional data to send
+ * @param dlen
+ * Maximum size of what the the buffer will hold
+ * @param xact
+ * Identifier of the request
+ * @param flags
+ * Message type inband, rxbuf, gpa
+ * @param need_sig
+ * Is host signal tx is required (optional)
+ *
+ * Sends data in buffer directly to hyper-v via the vmbus
+ */
+int rte_vmbus_chan_send_sglist(struct vmbus_channel *channel,
+ struct vmbus_gpa gpa[], uint32_t gpacnt,
+ void *data, uint32_t dlen,
+ uint64_t xact, bool *need_sig);
+/**
+ * Receive response to request on the given channel
+ * skips the channel header.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param data
+ * Pointer to the buffer you want to receive the data into.
+ * @param len
+ * Pointer to size of receive buffer (in/out)
+ * @param
+ * Pointer to received transaction_id
+ * @return
+ * On success, returns 0
+ * On failure, returns negative errno.
+ */
+int rte_vmbus_chan_recv(struct vmbus_channel *chan,
+ void *data, uint32_t *len,
+ uint64_t *request_id);
+
+/**
+ * Receive response to request on the given channel
+ * includes the channel header.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param data
+ * Pointer to the buffer you want to receive the data into.
+ * @param len
+ * Pointer to size of receive buffer (in/out)
+ * @return
+ * On success, returns number of bytes read.
+ * On failure, returns negative errno.
+ */
+int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan,
+ void *data, uint32_t *len);
+
+/**
+ * Notify host of bytes read (after recv_raw)
+ * Signals host if required.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param bytes_read
+ * Number of bytes read since last signal
+ */
+void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read);
+
+/**
+ * Determine sub channel index of the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @return
+ * Sub channel index (0 for primary)
+ */
+uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan);
+
+/**
+ * Register a VMBUS driver.
+ *
+ * @param driver
+ * A pointer to a rte_vmbus_driver structure describing the driver
+ * to be registered.
+ */
+void rte_vmbus_register(struct rte_vmbus_driver *driver);
+
+/**
+ * For debug dump contents of ring buffer.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ */
+void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan);
+
+/**
+ * Unregister a VMBUS driver.
+ *
+ * @param driver
+ * A pointer to a rte_vmbus_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_vmbus_unregister(struct rte_vmbus_driver *driver);
+
+/** Helper for VMBUS device registration from driver instance */
+#define RTE_PMD_REGISTER_VMBUS(nm, vmbus_drv) \
+ RTE_INIT(vmbusinitfn_ ##nm); \
+ static void vmbusinitfn_ ##nm(void) \
+ { \
+ (vmbus_drv).driver.name = RTE_STR(nm); \
+ rte_vmbus_register(&vmbus_drv); \
+ } \
+ RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VMBUS_H_ */
diff --git a/drivers/bus/vmbus/rte_bus_vmbus_version.map b/drivers/bus/vmbus/rte_bus_vmbus_version.map
new file mode 100644
index 00000000..dabb9203
--- /dev/null
+++ b/drivers/bus/vmbus/rte_bus_vmbus_version.map
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+DPDK_18.08 {
+ global:
+
+ rte_vmbus_chan_close;
+ rte_vmbus_chan_open;
+ rte_vmbus_chan_recv;
+ rte_vmbus_chan_recv_raw;
+ rte_vmbus_chan_rx_empty;
+ rte_vmbus_chan_send;
+ rte_vmbus_chan_send_sglist;
+ rte_vmbus_chan_signal_read;
+ rte_vmbus_chan_signal_tx;
+ rte_vmbus_irq_mask;
+ rte_vmbus_irq_read;
+ rte_vmbus_irq_unmask;
+ rte_vmbus_map_device;
+ rte_vmbus_max_channels;
+ rte_vmbus_probe;
+ rte_vmbus_register;
+ rte_vmbus_scan;
+ rte_vmbus_sub_channel_index;
+ rte_vmbus_subchan_open;
+ rte_vmbus_unmap_device;
+ rte_vmbus_unregister;
+
+ local: *;
+};
diff --git a/drivers/bus/vmbus/rte_vmbus_reg.h b/drivers/bus/vmbus/rte_vmbus_reg.h
new file mode 100644
index 00000000..f5a0693d
--- /dev/null
+++ b/drivers/bus/vmbus/rte_vmbus_reg.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_REG_H_
+#define _VMBUS_REG_H_
+
+/*
+ * Hyper-V SynIC message format.
+ */
+#define VMBUS_MSG_DSIZE_MAX 240
+#define VMBUS_MSG_SIZE 256
+
+struct vmbus_message {
+ uint32_t type; /* HYPERV_MSGTYPE_ */
+ uint8_t dsize; /* data size */
+ uint8_t flags; /* VMBUS_MSGFLAG_ */
+ uint16_t rsvd;
+ uint64_t id;
+ uint8_t data[VMBUS_MSG_DSIZE_MAX];
+} __rte_packed;
+
+#define VMBUS_MSGFLAG_PENDING 0x01
+
+/*
+ * Hyper-V Monitor Notification Facility
+ */
+
+struct vmbus_mon_trig {
+ uint32_t pending;
+ uint32_t armed;
+} __rte_packed;
+
+#define VMBUS_MONTRIGS_MAX 4
+#define VMBUS_MONTRIG_LEN 32
+
+/*
+ * Hyper-V Monitor Notification Facility
+ */
+struct hyperv_mon_param {
+ uint32_t connid;
+ uint16_t evtflag_ofs;
+ uint16_t rsvd;
+} __rte_packed;
+
+struct vmbus_mon_page {
+ uint32_t state;
+ uint32_t rsvd1;
+
+ struct vmbus_mon_trig trigs[VMBUS_MONTRIGS_MAX];
+ uint8_t rsvd2[536];
+
+ uint16_t lat[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN];
+ uint8_t rsvd3[256];
+
+ struct hyperv_mon_param
+ param[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN];
+ uint8_t rsvd4[1984];
+} __rte_packed;
+
+/*
+ * Buffer ring
+ */
+
+struct vmbus_bufring {
+ volatile uint32_t windex;
+ volatile uint32_t rindex;
+
+ /*
+ * Interrupt mask {0,1}
+ *
+ * For TX bufring, host set this to 1, when it is processing
+ * the TX bufring, so that we can safely skip the TX event
+ * notification to host.
+ *
+ * For RX bufring, once this is set to 1 by us, host will not
+ * further dispatch interrupts to us, even if there are data
+ * pending on the RX bufring. This effectively disables the
+ * interrupt of the channel to which this RX bufring is attached.
+ */
+ volatile uint32_t imask;
+
+ /*
+ * Win8 uses some of the reserved bits to implement
+ * interrupt driven flow management. On the send side
+ * we can request that the receiver interrupt the sender
+ * when the ring transitions from being full to being able
+ * to handle a message of size "pending_send_sz".
+ *
+ * Add necessary state for this enhancement.
+ */
+ volatile uint32_t pending_send;
+ uint32_t reserved1[12];
+
+ union {
+ struct {
+ uint32_t feat_pending_send_sz:1;
+ };
+ uint32_t value;
+ } feature_bits;
+
+ /* Pad it to PAGE_SIZE so that data starts on page boundary */
+ uint8_t reserved2[4028];
+
+ /*
+ * Ring data starts here + RingDataStartOffset
+ * !!! DO NOT place any fields below this !!!
+ */
+ uint8_t data[0];
+} __rte_packed;
+
+/*
+ * Channel packets
+ */
+
+/* Channel packet flags */
+#define VMBUS_CHANPKT_TYPE_INBAND 0x0006
+#define VMBUS_CHANPKT_TYPE_RXBUF 0x0007
+#define VMBUS_CHANPKT_TYPE_GPA 0x0009
+#define VMBUS_CHANPKT_TYPE_COMP 0x000b
+
+#define VMBUS_CHANPKT_FLAG_NONE 0
+#define VMBUS_CHANPKT_FLAG_RC 0x0001 /* report completion */
+
+#define VMBUS_CHANPKT_SIZE_SHIFT 3
+#define VMBUS_CHANPKT_SIZE_ALIGN (1 << VMBUS_CHANPKT_SIZE_SHIFT)
+#define VMBUS_CHANPKT_HLEN_MIN \
+ (sizeof(struct vmbus_chanpkt_hdr) >> VMBUS_CHANPKT_SIZE_SHIFT)
+
+static inline uint32_t
+vmbus_chanpkt_getlen(uint16_t pktlen)
+{
+ return (uint32_t)pktlen << VMBUS_CHANPKT_SIZE_SHIFT;
+}
+
+/*
+ * GPA stuffs.
+ */
+struct vmbus_gpa_range {
+ uint32_t len;
+ uint32_t ofs;
+ uint64_t page[0];
+} __rte_packed;
+
+/* This is actually vmbus_gpa_range.gpa_page[1] */
+struct vmbus_gpa {
+ uint32_t len;
+ uint32_t ofs;
+ uint64_t page;
+} __rte_packed;
+
+struct vmbus_chanpkt_hdr {
+ uint16_t type; /* VMBUS_CHANPKT_TYPE_ */
+ uint16_t hlen; /* header len, in 8 bytes */
+ uint16_t tlen; /* total len, in 8 bytes */
+ uint16_t flags; /* VMBUS_CHANPKT_FLAG_ */
+ uint64_t xactid;
+} __rte_packed;
+
+static inline uint32_t
+vmbus_chanpkt_datalen(const struct vmbus_chanpkt_hdr *pkt)
+{
+ return vmbus_chanpkt_getlen(pkt->tlen)
+ - vmbus_chanpkt_getlen(pkt->hlen);
+}
+
+struct vmbus_chanpkt {
+ struct vmbus_chanpkt_hdr hdr;
+} __rte_packed;
+
+struct vmbus_rxbuf_desc {
+ uint32_t len;
+ uint32_t ofs;
+} __rte_packed;
+
+struct vmbus_chanpkt_rxbuf {
+ struct vmbus_chanpkt_hdr hdr;
+ uint16_t rxbuf_id;
+ uint16_t rsvd;
+ uint32_t rxbuf_cnt;
+ struct vmbus_rxbuf_desc rxbuf[];
+} __rte_packed;
+
+struct vmbus_chanpkt_sglist {
+ struct vmbus_chanpkt_hdr hdr;
+ uint32_t rsvd;
+ uint32_t gpa_cnt;
+ struct vmbus_gpa gpa[];
+} __rte_packed;
+
+/*
+ * Channel messages
+ * - Embedded in vmbus_message.msg_data, e.g. response and notification.
+ * - Embedded in hypercall_postmsg_in.hc_data, e.g. request.
+ */
+
+#define VMBUS_CHANMSG_TYPE_CHOFFER 1 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHRESCIND 2 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHREQUEST 3 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CHOFFER_DONE 4 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHOPEN 5 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CHOPEN_RESP 6 /* RESP */
+#define VMBUS_CHANMSG_TYPE_CHCLOSE 7 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_CONN 8 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_SUBCONN 9 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_CONNRESP 10 /* RESP */
+#define VMBUS_CHANMSG_TYPE_GPADL_DISCONN 11 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_DISCONNRESP 12 /* RESP */
+#define VMBUS_CHANMSG_TYPE_CHFREE 13 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CONNECT 14 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CONNECT_RESP 15 /* RESP */
+#define VMBUS_CHANMSG_TYPE_DISCONNECT 16 /* REQ */
+#define VMBUS_CHANMSG_TYPE_MAX 22
+
+struct vmbus_chanmsg_hdr {
+ uint32_t type; /* VMBUS_CHANMSG_TYPE_ */
+ uint32_t rsvd;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CONNECT */
+struct vmbus_chanmsg_connect {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t ver;
+ uint32_t rsvd;
+ uint64_t evtflags;
+ uint64_t mnf1;
+ uint64_t mnf2;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CONNECT_RESP */
+struct vmbus_chanmsg_connect_resp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint8_t done;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHREQUEST */
+struct vmbus_chanmsg_chrequest {
+ struct vmbus_chanmsg_hdr hdr;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_DISCONNECT */
+struct vmbus_chanmsg_disconnect {
+ struct vmbus_chanmsg_hdr hdr;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOPEN */
+struct vmbus_chanmsg_chopen {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t openid;
+ uint32_t gpadl;
+ uint32_t vcpuid;
+ uint32_t txbr_pgcnt;
+#define VMBUS_CHANMSG_CHOPEN_UDATA_SIZE 120
+ uint8_t udata[VMBUS_CHANMSG_CHOPEN_UDATA_SIZE];
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOPEN_RESP */
+struct vmbus_chanmsg_chopen_resp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t openid;
+ uint32_t status;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_GPADL_CONN */
+struct vmbus_chanmsg_gpadl_conn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+ uint16_t range_len;
+ uint16_t range_cnt;
+ struct vmbus_gpa_range range;
+} __rte_packed;
+
+#define VMBUS_CHANMSG_GPADL_CONN_PGMAX 26
+
+/* VMBUS_CHANMSG_TYPE_GPADL_SUBCONN */
+struct vmbus_chanmsg_gpadl_subconn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t msgno;
+ uint32_t gpadl;
+ uint64_t gpa_page[];
+} __rte_packed;
+
+#define VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX 28
+
+/* VMBUS_CHANMSG_TYPE_GPADL_CONNRESP */
+struct vmbus_chanmsg_gpadl_connresp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+ uint32_t status;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHCLOSE */
+struct vmbus_chanmsg_chclose {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_GPADL_DISCONN */
+struct vmbus_chanmsg_gpadl_disconn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHFREE */
+struct vmbus_chanmsg_chfree {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHRESCIND */
+struct vmbus_chanmsg_chrescind {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOFFER */
+struct vmbus_chanmsg_choffer {
+ struct vmbus_chanmsg_hdr hdr;
+ rte_uuid_t chtype;
+ rte_uuid_t chinst;
+ uint64_t chlat; /* unit: 100ns */
+ uint32_t chrev;
+ uint32_t svrctx_sz;
+ uint16_t chflags;
+ uint16_t mmio_sz; /* unit: MB */
+ uint8_t udata[120];
+ uint16_t subidx;
+ uint16_t rsvd;
+ uint32_t chanid;
+ uint8_t montrig;
+ uint8_t flags1; /* VMBUS_CHOFFER_FLAG1_ */
+ uint16_t flags2;
+ uint32_t connid;
+} __rte_packed;
+
+#define VMBUS_CHOFFER_FLAG1_HASMNF 0x01
+
+#endif /* !_VMBUS_REG_H_ */
diff --git a/drivers/bus/vmbus/vmbus_bufring.c b/drivers/bus/vmbus/vmbus_bufring.c
new file mode 100644
index 00000000..c8800160
--- /dev/null
+++ b/drivers/bus/vmbus/vmbus_bufring.c
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2009-2012,2016 Microsoft Corp.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/uio.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_pause.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+/* Increase bufring index by inc with wraparound */
+static inline uint32_t vmbus_br_idxinc(uint32_t idx, uint32_t inc, uint32_t sz)
+{
+ idx += inc;
+ if (idx >= sz)
+ idx -= sz;
+
+ return idx;
+}
+
+void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen)
+{
+ br->vbr = buf;
+ br->windex = br->vbr->windex;
+ br->dsize = blen - sizeof(struct vmbus_bufring);
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to be
+ * signaled.
+ *
+ * The contract:
+ * - The host guarantees that while it is draining the TX bufring,
+ * it will set the br_imask to indicate it does not need to be
+ * interrupted when new data are added.
+ * - The host guarantees that it will completely drain the TX bufring
+ * before exiting the read loop. Further, once the TX bufring is
+ * empty, it will clear the br_imask and re-check to see if new
+ * data have arrived.
+ */
+static inline bool
+vmbus_txbr_need_signal(const struct vmbus_br *tbr, uint32_t old_windex)
+{
+ rte_smp_mb();
+ if (tbr->vbr->imask)
+ return false;
+
+ rte_smp_rmb();
+
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ return old_windex == tbr->vbr->rindex;
+}
+
+static inline uint32_t
+vmbus_txbr_copyto(const struct vmbus_br *tbr, uint32_t windex,
+ const void *src0, uint32_t cplen)
+{
+ uint8_t *br_data = tbr->vbr->data;
+ uint32_t br_dsize = tbr->dsize;
+ const uint8_t *src = src0;
+
+ /* XXX use double mapping like Linux kernel? */
+ if (cplen > br_dsize - windex) {
+ uint32_t fraglen = br_dsize - windex;
+
+ /* Wrap-around detected */
+ memcpy(br_data + windex, src, fraglen);
+ memcpy(br_data, src + fraglen, cplen - fraglen);
+ } else {
+ memcpy(br_data + windex, src, cplen);
+ }
+
+ return vmbus_br_idxinc(windex, cplen, br_dsize);
+}
+
+/*
+ * Write scattered channel packet to TX bufring.
+ *
+ * The offset of this channel packet is written as a 64bits value
+ * immediately after this channel packet.
+ *
+ * The write goes through three stages:
+ * 1. Reserve space in ring buffer for the new data.
+ * Writer atomically moves priv_write_index.
+ * 2. Copy the new data into the ring.
+ * 3. Update the tail of the ring (visible to host) that indicates
+ * next read location. Writer updates write_index
+ */
+int
+vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen,
+ bool *need_sig)
+{
+ struct vmbus_bufring *vbr = tbr->vbr;
+ uint32_t ring_size = tbr->dsize;
+ uint32_t old_windex, next_windex, windex, total;
+ uint64_t save_windex;
+ int i;
+
+ total = 0;
+ for (i = 0; i < iovlen; i++)
+ total += iov[i].iov_len;
+ total += sizeof(save_windex);
+
+ /* Reserve space in ring */
+ do {
+ uint32_t avail;
+
+ /* Get current free location */
+ old_windex = tbr->windex;
+
+ /* Prevent compiler reordering this with calculation */
+ rte_compiler_barrier();
+
+ avail = vmbus_br_availwrite(tbr, old_windex);
+
+ /* If not enough space in ring, then tell caller. */
+ if (avail <= total)
+ return -EAGAIN;
+
+ next_windex = vmbus_br_idxinc(old_windex, total, ring_size);
+
+ /* Atomic update of next write_index for other threads */
+ } while (!rte_atomic32_cmpset(&tbr->windex, old_windex, next_windex));
+
+ /* Space from old..new is now reserved */
+ windex = old_windex;
+ for (i = 0; i < iovlen; i++) {
+ windex = vmbus_txbr_copyto(tbr, windex,
+ iov[i].iov_base, iov[i].iov_len);
+ }
+
+ /* Set the offset of the current channel packet. */
+ save_windex = ((uint64_t)old_windex) << 32;
+ windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
+ sizeof(save_windex));
+
+ /* The region reserved should match region used */
+ RTE_ASSERT(windex == next_windex);
+
+ /* Ensure that data is available before updating host index */
+ rte_smp_wmb();
+
+ /* Checkin for our reservation. wait for our turn to update host */
+ while (!rte_atomic32_cmpset(&vbr->windex, old_windex, next_windex))
+ rte_pause();
+
+ /* If host had read all data before this, then need to signal */
+ *need_sig |= vmbus_txbr_need_signal(tbr, old_windex);
+ return 0;
+}
+
+static inline uint32_t
+vmbus_rxbr_copyfrom(const struct vmbus_br *rbr, uint32_t rindex,
+ void *dst0, size_t cplen)
+{
+ const uint8_t *br_data = rbr->vbr->data;
+ uint32_t br_dsize = rbr->dsize;
+ uint8_t *dst = dst0;
+
+ if (cplen > br_dsize - rindex) {
+ uint32_t fraglen = br_dsize - rindex;
+
+ /* Wrap-around detected. */
+ memcpy(dst, br_data + rindex, fraglen);
+ memcpy(dst + fraglen, br_data, cplen - fraglen);
+ } else {
+ memcpy(dst, br_data + rindex, cplen);
+ }
+
+ return vmbus_br_idxinc(rindex, cplen, br_dsize);
+}
+
+/* Copy data from receive ring but don't change index */
+int
+vmbus_rxbr_peek(const struct vmbus_br *rbr, void *data, size_t dlen)
+{
+ uint32_t avail;
+
+ /*
+ * The requested data and the 64bits channel packet
+ * offset should be there at least.
+ */
+ avail = vmbus_br_availread(rbr);
+ if (avail < dlen + sizeof(uint64_t))
+ return -EAGAIN;
+
+ vmbus_rxbr_copyfrom(rbr, rbr->vbr->rindex, data, dlen);
+ return 0;
+}
+
+/*
+ * Copy data from receive ring and change index
+ * NOTE:
+ * We assume (dlen + skip) == sizeof(channel packet).
+ */
+int
+vmbus_rxbr_read(struct vmbus_br *rbr, void *data, size_t dlen, size_t skip)
+{
+ struct vmbus_bufring *vbr = rbr->vbr;
+ uint32_t br_dsize = rbr->dsize;
+ uint32_t rindex;
+
+ if (vmbus_br_availread(rbr) < dlen + skip + sizeof(uint64_t))
+ return -EAGAIN;
+
+ /* Record where host was when we started read (for debug) */
+ rbr->windex = rbr->vbr->windex;
+
+ /*
+ * Copy channel packet from RX bufring.
+ */
+ rindex = vmbus_br_idxinc(rbr->vbr->rindex, skip, br_dsize);
+ rindex = vmbus_rxbr_copyfrom(rbr, rindex, data, dlen);
+
+ /*
+ * Discard this channel packet's 64bits offset, which is useless to us.
+ */
+ rindex = vmbus_br_idxinc(rindex, sizeof(uint64_t), br_dsize);
+
+ /* Update the read index _after_ the channel packet is fetched. */
+ rte_compiler_barrier();
+
+ vbr->rindex = rindex;
+
+ return 0;
+}
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
new file mode 100644
index 00000000..cc5f3e83
--- /dev/null
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/uio.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+static inline void
+vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+{
+ /* Use GCC builtin which atomic does atomic OR operation */
+ __sync_or_and_fetch(addr, mask);
+}
+
+static inline void
+vmbus_send_interrupt(const struct rte_vmbus_device *dev, uint32_t relid)
+{
+ uint32_t *int_addr;
+ uint32_t int_mask;
+
+ int_addr = dev->int_page + relid / 32;
+ int_mask = 1u << (relid % 32);
+
+ vmbus_sync_set_bit(int_addr, int_mask);
+}
+
+static inline void
+vmbus_set_monitor(const struct rte_vmbus_device *dev, uint32_t monitor_id)
+{
+ uint32_t *monitor_addr, monitor_mask;
+ unsigned int trigger_index;
+
+ trigger_index = monitor_id / HV_MON_TRIG_LEN;
+ monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
+
+ monitor_addr = &dev->monitor_page->trigs[trigger_index].pending;
+ vmbus_sync_set_bit(monitor_addr, monitor_mask);
+}
+
+static void
+vmbus_set_event(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan)
+{
+ vmbus_send_interrupt(dev, chan->relid);
+ vmbus_set_monitor(dev, chan->monitor_id);
+}
+
+/*
+ * Notify host that there are data pending on our TX bufring.
+ *
+ * Since this in userspace, rely on the monitor page.
+ * Can't do a hypercall from userspace.
+ */
+void
+rte_vmbus_chan_signal_tx(const struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *dev = chan->device;
+ const struct vmbus_br *tbr = &chan->txbr;
+
+ /* Make sure all updates are done before signaling host */
+ rte_smp_wmb();
+
+ /* If host is ignoring interrupts? */
+ if (tbr->vbr->imask)
+ return;
+
+ vmbus_set_event(dev, chan);
+}
+
+
+/* Do a simple send directly using transmit ring. */
+int rte_vmbus_chan_send(struct vmbus_channel *chan, uint16_t type,
+ void *data, uint32_t dlen,
+ uint64_t xactid, uint32_t flags, bool *need_sig)
+{
+ struct vmbus_chanpkt pkt;
+ unsigned int pktlen, pad_pktlen;
+ const uint32_t hlen = sizeof(pkt);
+ bool send_evt = false;
+ uint64_t pad = 0;
+ struct iovec iov[3];
+ int error;
+
+ pktlen = hlen + dlen;
+ pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
+
+ pkt.hdr.type = type;
+ pkt.hdr.flags = flags;
+ pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.xactid = xactid;
+
+ iov[0].iov_base = &pkt;
+ iov[0].iov_len = hlen;
+ iov[1].iov_base = data;
+ iov[1].iov_len = dlen;
+ iov[2].iov_base = &pad;
+ iov[2].iov_len = pad_pktlen - pktlen;
+
+ error = vmbus_txbr_write(&chan->txbr, iov, 3, &send_evt);
+
+ /*
+ * caller sets need_sig to non-NULL if it will handle
+ * signaling if required later.
+ * if need_sig is NULL, signal now if needed.
+ */
+ if (need_sig)
+ *need_sig |= send_evt;
+ else if (error == 0 && send_evt)
+ rte_vmbus_chan_signal_tx(chan);
+ return error;
+}
+
+/* Do a scatter/gather send where the descriptor points to data. */
+int rte_vmbus_chan_send_sglist(struct vmbus_channel *chan,
+ struct vmbus_gpa sg[], uint32_t sglen,
+ void *data, uint32_t dlen,
+ uint64_t xactid, bool *need_sig)
+{
+ struct vmbus_chanpkt_sglist pkt;
+ unsigned int pktlen, pad_pktlen, hlen;
+ bool send_evt = false;
+ struct iovec iov[4];
+ uint64_t pad = 0;
+ int error;
+
+ hlen = offsetof(struct vmbus_chanpkt_sglist, gpa[sglen]);
+ pktlen = hlen + dlen;
+ pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
+
+ pkt.hdr.type = VMBUS_CHANPKT_TYPE_GPA;
+ pkt.hdr.flags = VMBUS_CHANPKT_FLAG_RC;
+ pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.xactid = xactid;
+ pkt.rsvd = 0;
+ pkt.gpa_cnt = sglen;
+
+ iov[0].iov_base = &pkt;
+ iov[0].iov_len = sizeof(pkt);
+ iov[1].iov_base = sg;
+ iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
+ iov[2].iov_base = data;
+ iov[2].iov_len = dlen;
+ iov[3].iov_base = &pad;
+ iov[3].iov_len = pad_pktlen - pktlen;
+
+ error = vmbus_txbr_write(&chan->txbr, iov, 4, &send_evt);
+
+ /* if caller is batching, just propagate the status */
+ if (need_sig)
+ *need_sig |= send_evt;
+ else if (error == 0 && send_evt)
+ rte_vmbus_chan_signal_tx(chan);
+ return error;
+}
+
+bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel)
+{
+ const struct vmbus_br *br = &channel->rxbr;
+
+ return br->vbr->rindex == br->vbr->windex;
+}
+
+/* Signal host after reading N bytes */
+void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read)
+{
+ struct vmbus_br *rbr = &chan->rxbr;
+ uint32_t write_sz, pending_sz;
+
+ /* No need for signaling on older versions */
+ if (!rbr->vbr->feature_bits.feat_pending_send_sz)
+ return;
+
+ /* Make sure reading of pending happens after new read index */
+ rte_mb();
+
+ pending_sz = rbr->vbr->pending_send;
+ if (!pending_sz)
+ return;
+
+ rte_smp_rmb();
+ write_sz = vmbus_br_availwrite(rbr, rbr->vbr->windex);
+
+ /* If there was space before then host was not blocked */
+ if (write_sz - bytes_read > pending_sz)
+ return;
+
+ /* If pending write will not fit */
+ if (write_sz <= pending_sz)
+ return;
+
+ vmbus_set_event(chan->device, chan);
+}
+
+int rte_vmbus_chan_recv(struct vmbus_channel *chan, void *data, uint32_t *len,
+ uint64_t *request_id)
+{
+ struct vmbus_chanpkt_hdr pkt;
+ uint32_t dlen, hlen, bufferlen = *len;
+ int error;
+
+ *len = 0;
+
+ error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
+ if (error)
+ return error;
+
+ if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
+ VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
+ /* XXX this channel is dead actually. */
+ return -EIO;
+ }
+
+ if (unlikely(pkt.hlen > pkt.tlen)) {
+ VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
+ pkt.hlen, pkt.tlen);
+ return -EIO;
+ }
+
+ /* Length are in quad words */
+ hlen = pkt.hlen << VMBUS_CHANPKT_SIZE_SHIFT;
+ dlen = (pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT) - hlen;
+ *len = dlen;
+
+ /* If caller buffer is not large enough */
+ if (unlikely(dlen > bufferlen))
+ return -ENOBUFS;
+
+ if (request_id)
+ *request_id = pkt.xactid;
+
+ /* Read data and skip packet header */
+ error = vmbus_rxbr_read(&chan->rxbr, data, dlen, hlen);
+ if (error)
+ return error;
+
+ rte_vmbus_chan_signal_read(chan, dlen + hlen + sizeof(uint64_t));
+ return 0;
+}
+
+/* TODO: replace this with inplace ring buffer (no copy) */
+int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan,
+ void *data, uint32_t *len)
+{
+ struct vmbus_chanpkt_hdr pkt;
+ uint32_t dlen, bufferlen = *len;
+ int error;
+
+ error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
+ if (error)
+ return error;
+
+ if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
+ VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
+ /* XXX this channel is dead actually. */
+ return -EIO;
+ }
+
+ if (unlikely(pkt.hlen > pkt.tlen)) {
+ VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
+ pkt.hlen, pkt.tlen);
+ return -EIO;
+ }
+
+ /* Length are in quad words */
+ dlen = pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT;
+ *len = dlen;
+
+ /* If caller buffer is not large enough */
+ if (unlikely(dlen > bufferlen))
+ return -ENOBUFS;
+
+ /* Read data and skip packet header */
+ error = vmbus_rxbr_read(&chan->rxbr, data, dlen, 0);
+ if (error)
+ return error;
+
+ /* Return the number of bytes read */
+ return dlen + sizeof(uint64_t);
+}
+
+int vmbus_chan_create(const struct rte_vmbus_device *device,
+ uint16_t relid, uint16_t subid, uint8_t monitor_id,
+ struct vmbus_channel **new_chan)
+{
+ struct vmbus_channel *chan;
+ int err;
+
+ chan = rte_zmalloc_socket("VMBUS", sizeof(*chan), RTE_CACHE_LINE_SIZE,
+ device->device.numa_node);
+ if (!chan)
+ return -ENOMEM;
+
+ STAILQ_INIT(&chan->subchannel_list);
+ chan->device = device;
+ chan->subchannel_id = subid;
+ chan->relid = relid;
+ chan->monitor_id = monitor_id;
+ *new_chan = chan;
+
+ err = vmbus_uio_map_rings(chan);
+ if (err) {
+ rte_free(chan);
+ return err;
+ }
+
+ return 0;
+}
+
+/* Setup the primary channel */
+int rte_vmbus_chan_open(struct rte_vmbus_device *device,
+ struct vmbus_channel **new_chan)
+{
+ int err;
+
+ err = vmbus_chan_create(device, device->relid, 0,
+ device->monitor_id, new_chan);
+ if (!err)
+ device->primary = *new_chan;
+
+ return err;
+}
+
+int rte_vmbus_max_channels(const struct rte_vmbus_device *device)
+{
+ if (vmbus_uio_subchannels_supported(device, device->primary))
+ return VMBUS_MAX_CHANNELS;
+ else
+ return 1;
+}
+
+/* Setup secondary channel */
+int rte_vmbus_subchan_open(struct vmbus_channel *primary,
+ struct vmbus_channel **new_chan)
+{
+ struct vmbus_channel *chan;
+ int err;
+
+ err = vmbus_uio_get_subchan(primary, &chan);
+ if (err)
+ return err;
+
+ STAILQ_INSERT_TAIL(&primary->subchannel_list, chan, next);
+ *new_chan = chan;
+ return 0;
+}
+
+uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan)
+{
+ return chan->subchannel_id;
+}
+
+void rte_vmbus_chan_close(struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *device = chan->device;
+ struct vmbus_channel *primary = device->primary;
+
+ if (chan != primary)
+ STAILQ_REMOVE(&primary->subchannel_list, chan,
+ vmbus_channel, next);
+
+ rte_free(chan);
+}
+
+static void vmbus_dump_ring(FILE *f, const char *id, const struct vmbus_br *br)
+{
+ const struct vmbus_bufring *vbr = br->vbr;
+ struct vmbus_chanpkt_hdr pkt;
+
+ fprintf(f, "%s windex=%u rindex=%u mask=%u pending=%u feature=%#x\n",
+ id, vbr->windex, vbr->rindex, vbr->imask,
+ vbr->pending_send, vbr->feature_bits.value);
+ fprintf(f, " size=%u avail write=%u read=%u\n",
+ br->dsize, vmbus_br_availwrite(br, vbr->windex),
+ vmbus_br_availread(br));
+
+ if (vmbus_rxbr_peek(br, &pkt, sizeof(pkt)) == 0)
+ fprintf(f, " pkt type %#x len %u flags %#x xactid %#"PRIx64"\n",
+ pkt.type,
+ pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT,
+ pkt.flags, pkt.xactid);
+}
+
+void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan)
+{
+ fprintf(f, "channel[%u] relid=%u monitor=%u\n",
+ chan->subchannel_id, chan->relid, chan->monitor_id);
+ vmbus_dump_ring(f, "rxbr", &chan->rxbr);
+ vmbus_dump_ring(f, "txbr", &chan->txbr);
+}
diff --git a/drivers/bus/vmbus/vmbus_common.c b/drivers/bus/vmbus/vmbus_common.c
new file mode 100644
index 00000000..c7165ad5
--- /dev/null
+++ b/drivers/bus/vmbus/vmbus_common.c
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_devargs.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+int vmbus_logtype_bus;
+extern struct rte_vmbus_bus rte_vmbus_bus;
+
+/* map a particular resource from a file */
+void *
+vmbus_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
+ int flags)
+{
+ void *mapaddr;
+
+ /* Map the memory resource of device */
+ mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | flags, fd, offset);
+ if (mapaddr == MAP_FAILED) {
+ VMBUS_LOG(ERR,
+ "mmap(%d, %p, %zu, %ld) failed: %s",
+ fd, requested_addr, size, (long)offset,
+ strerror(errno));
+ }
+ return mapaddr;
+}
+
+/* unmap a particular resource */
+void
+vmbus_unmap_resource(void *requested_addr, size_t size)
+{
+ if (requested_addr == NULL)
+ return;
+
+ /* Unmap the VMBUS memory resource of device */
+ if (munmap(requested_addr, size)) {
+ VMBUS_LOG(ERR, "munmap(%p, 0x%lx) failed: %s",
+ requested_addr, (unsigned long)size,
+ strerror(errno));
+ } else
+ VMBUS_LOG(DEBUG, " VMBUS memory unmapped at %p",
+ requested_addr);
+}
+
+/**
+ * Match the VMBUS driver and device using UUID table
+ *
+ * @param drv
+ * VMBUS driver from which ID table would be extracted
+ * @param pci_dev
+ * VMBUS device to match against the driver
+ * @return
+ * true for successful match
+ * false for unsuccessful match
+ */
+static bool
+vmbus_match(const struct rte_vmbus_driver *dr,
+ const struct rte_vmbus_device *dev)
+{
+ const rte_uuid_t *id_table;
+
+ for (id_table = dr->id_table; !rte_uuid_is_null(*id_table); ++id_table) {
+ if (rte_uuid_compare(*id_table, dev->class_id) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * If device ID match, call the devinit() function of the driver.
+ */
+static int
+vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
+ struct rte_vmbus_device *dev)
+{
+ char guid[RTE_UUID_STRLEN];
+ int ret;
+
+ if (!vmbus_match(dr, dev))
+ return 1; /* not supported */
+
+ rte_uuid_unparse(dev->device_id, guid, sizeof(guid));
+ VMBUS_LOG(INFO, "VMBUS device %s on NUMA socket %i",
+ guid, dev->device.numa_node);
+
+ /* TODO add blacklisted */
+
+ /* map resources for device */
+ ret = rte_vmbus_map_device(dev);
+ if (ret != 0)
+ return ret;
+
+ /* reference driver structure */
+ dev->driver = dr;
+ dev->device.driver = &dr->driver;
+
+ if (dev->device.numa_node < 0) {
+ VMBUS_LOG(WARNING, " Invalid NUMA socket, default to 0");
+ dev->device.numa_node = 0;
+ }
+
+ /* call the driver probe() function */
+ VMBUS_LOG(INFO, " probe driver: %s", dr->driver.name);
+ ret = dr->probe(dr, dev);
+ if (ret) {
+ dev->driver = NULL;
+ rte_vmbus_unmap_device(dev);
+ }
+
+ return ret;
+}
+
+/*
+ * IF device class GUID mathces, call the probe function of
+ * registere drivers for the vmbus device.
+ * Return -1 if initialization failed,
+ * and 1 if no driver found for this device.
+ */
+static int
+vmbus_probe_all_drivers(struct rte_vmbus_device *dev)
+{
+ struct rte_vmbus_driver *dr;
+ int rc;
+
+ /* Check if a driver is already loaded */
+ if (dev->driver != NULL) {
+ VMBUS_LOG(DEBUG, "VMBUS driver already loaded");
+ return 0;
+ }
+
+ FOREACH_DRIVER_ON_VMBUS(dr) {
+ rc = vmbus_probe_one_driver(dr, dev);
+ if (rc < 0) /* negative is an error */
+ return -1;
+
+ if (rc > 0) /* positive driver doesn't support it */
+ continue;
+
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Scan the vmbus, and call the devinit() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+int
+rte_vmbus_probe(void)
+{
+ struct rte_vmbus_device *dev;
+ size_t probed = 0, failed = 0;
+ char ubuf[RTE_UUID_STRLEN];
+
+ FOREACH_DEVICE_ON_VMBUS(dev) {
+ probed++;
+
+ rte_uuid_unparse(dev->device_id, ubuf, sizeof(ubuf));
+
+ /* TODO: add whitelist/blacklist */
+
+ if (vmbus_probe_all_drivers(dev) < 0) {
+ VMBUS_LOG(NOTICE,
+ "Requested device %s cannot be used", ubuf);
+ rte_errno = errno;
+ failed++;
+ }
+ }
+
+ return (probed && probed == failed) ? -1 : 0;
+}
+
+static int
+vmbus_parse(const char *name, void *addr)
+{
+ rte_uuid_t guid;
+ int ret;
+
+ ret = rte_uuid_parse(name, guid);
+ if (ret == 0 && addr)
+ memcpy(addr, &guid, sizeof(guid));
+
+ return ret;
+}
+
+/* register vmbus driver */
+void
+rte_vmbus_register(struct rte_vmbus_driver *driver)
+{
+ VMBUS_LOG(DEBUG,
+ "Registered driver %s", driver->driver.name);
+
+ TAILQ_INSERT_TAIL(&rte_vmbus_bus.driver_list, driver, next);
+ driver->bus = &rte_vmbus_bus;
+}
+
+/* unregister vmbus driver */
+void
+rte_vmbus_unregister(struct rte_vmbus_driver *driver)
+{
+ TAILQ_REMOVE(&rte_vmbus_bus.driver_list, driver, next);
+ driver->bus = NULL;
+}
+
+/* Add a device to VMBUS bus */
+void
+vmbus_add_device(struct rte_vmbus_device *vmbus_dev)
+{
+ TAILQ_INSERT_TAIL(&rte_vmbus_bus.device_list, vmbus_dev, next);
+}
+
+/* Insert a device into a predefined position in VMBUS bus */
+void
+vmbus_insert_device(struct rte_vmbus_device *exist_vmbus_dev,
+ struct rte_vmbus_device *new_vmbus_dev)
+{
+ TAILQ_INSERT_BEFORE(exist_vmbus_dev, new_vmbus_dev, next);
+}
+
+/* Remove a device from VMBUS bus */
+void
+vmbus_remove_device(struct rte_vmbus_device *vmbus_dev)
+{
+ TAILQ_REMOVE(&rte_vmbus_bus.device_list, vmbus_dev, next);
+}
+
+/* VMBUS doesn't support hotplug */
+static struct rte_device *
+vmbus_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_vmbus_device *dev;
+
+ FOREACH_DEVICE_ON_VMBUS(dev) {
+ if (start && &dev->device == start) {
+ start = NULL;
+ continue;
+ }
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+
+ return NULL;
+}
+
+
+struct rte_vmbus_bus rte_vmbus_bus = {
+ .bus = {
+ .scan = rte_vmbus_scan,
+ .probe = rte_vmbus_probe,
+ .find_device = vmbus_find_device,
+ .parse = vmbus_parse,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_vmbus_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_vmbus_bus.driver_list),
+};
+
+RTE_REGISTER_BUS(vmbus, rte_vmbus_bus.bus);
+
+RTE_INIT(vmbus_init_log)
+{
+ vmbus_logtype_bus = rte_log_register("bus.vmbus");
+ if (vmbus_logtype_bus >= 0)
+ rte_log_set_level(vmbus_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/drivers/bus/vmbus/vmbus_common_uio.c b/drivers/bus/vmbus/vmbus_common_uio.c
new file mode 100644
index 00000000..5ddd36ab
--- /dev/null
+++ b/drivers/bus/vmbus/vmbus_common_uio.c
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+static struct rte_tailq_elem vmbus_tailq = {
+ .name = "VMBUS_RESOURCE_LIST",
+};
+EAL_REGISTER_TAILQ(vmbus_tailq)
+
+static int
+vmbus_uio_map_secondary(struct rte_vmbus_device *dev)
+{
+ int fd, i;
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list
+ = RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our UUID */
+ if (rte_uuid_compare(uio_res->id, dev->device_id) != 0)
+ continue;
+
+ /* open /dev/uioX */
+ fd = open(uio_res->path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ uio_res->path, strerror(errno));
+ return -1;
+ }
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ void *mapaddr;
+
+ mapaddr = vmbus_map_resource(uio_res->maps[i].addr,
+ fd, 0,
+ uio_res->maps[i].size, 0);
+
+ if (mapaddr == uio_res->maps[i].addr)
+ continue;
+
+ VMBUS_LOG(ERR,
+ "Cannot mmap device resource file %s to address: %p",
+ uio_res->path, uio_res->maps[i].addr);
+
+ if (mapaddr != MAP_FAILED)
+ /* unmap addr wrongly mapped */
+ vmbus_unmap_resource(mapaddr,
+ (size_t)uio_res->maps[i].size);
+
+ /* unmap addrs correctly mapped */
+ while (--i >= 0)
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+
+ close(fd);
+ return -1;
+ }
+
+ /* fd is not needed in slave process, close it */
+ close(fd);
+ return 0;
+ }
+
+ VMBUS_LOG(ERR, "Cannot find resource for device");
+ return 1;
+}
+
+static int
+vmbus_uio_map_primary(struct rte_vmbus_device *dev)
+{
+ int i, ret;
+ struct mapped_vmbus_resource *uio_res = NULL;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ /* allocate uio resource */
+ ret = vmbus_uio_alloc_resource(dev, &uio_res);
+ if (ret)
+ return ret;
+
+ /* Map the resources */
+ for (i = 0; i < VMBUS_MAX_RESOURCE; i++) {
+ /* skip empty BAR */
+ if (dev->resource[i].len == 0)
+ continue;
+
+ ret = vmbus_uio_map_resource_by_index(dev, i, uio_res, 0);
+ if (ret)
+ goto error;
+ }
+
+ uio_res->nb_maps = i;
+
+ TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
+
+ return 0;
+error:
+ while (--i >= 0) {
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ }
+ vmbus_uio_free_resource(dev, uio_res);
+ return -1;
+}
+
+
+struct mapped_vmbus_resource *
+vmbus_uio_find_resource(const struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ if (dev == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+ /* skip this element if it doesn't match our VMBUS address */
+ if (rte_uuid_compare(uio_res->id, dev->device_id) == 0)
+ return uio_res;
+ }
+ return NULL;
+}
+
+/* map the VMBUS resource of a VMBUS device in virtual memory */
+int
+vmbus_uio_map_resource(struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ int ret;
+
+ /* TODO: handle rescind */
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.uio_cfg_fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+
+ /* secondary processes - use already recorded details */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ ret = vmbus_uio_map_secondary(dev);
+ else
+ ret = vmbus_uio_map_primary(dev);
+
+ if (ret != 0)
+ return ret;
+
+ uio_res = vmbus_uio_find_resource(dev);
+ if (!uio_res) {
+ VMBUS_LOG(ERR, "can not find resources!");
+ return -EIO;
+ }
+
+ if (uio_res->nb_maps <= HV_MON_PAGE_MAP) {
+ VMBUS_LOG(ERR, "VMBUS: only %u resources found!",
+ uio_res->nb_maps);
+ return -EINVAL;
+ }
+
+ dev->int_page = (uint32_t *)((char *)uio_res->maps[HV_INT_PAGE_MAP].addr
+ + (PAGE_SIZE >> 1));
+ dev->monitor_page = uio_res->maps[HV_MON_PAGE_MAP].addr;
+ return 0;
+}
+
+static void
+vmbus_uio_unmap(struct mapped_vmbus_resource *uio_res)
+{
+ int i;
+
+ if (uio_res == NULL)
+ return;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ }
+}
+
+/* unmap the VMBUS resource of a VMBUS device in virtual memory */
+void
+vmbus_uio_unmap_resource(struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ if (dev == NULL)
+ return;
+
+ /* find an entry for the device */
+ uio_res = vmbus_uio_find_resource(dev);
+ if (uio_res == NULL)
+ return;
+
+ /* secondary processes - just free maps */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return vmbus_uio_unmap(uio_res);
+
+ TAILQ_REMOVE(uio_res_list, uio_res, next);
+
+ /* unmap all resources */
+ vmbus_uio_unmap(uio_res);
+
+ /* free uio resource */
+ rte_free(uio_res);
+
+ /* close fd if in primary process */
+ close(dev->intr_handle.fd);
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+}
diff --git a/drivers/common/meson.build b/drivers/common/meson.build
index 5f6341b8..d7b7d8cf 100644
--- a/drivers/common/meson.build
+++ b/drivers/common/meson.build
@@ -2,6 +2,6 @@
# Copyright(c) 2018 Cavium, Inc
std_deps = ['eal']
-drivers = ['octeontx']
+drivers = ['octeontx', 'qat']
config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
driver_name_fmt = 'rte_common_@0@'
diff --git a/drivers/common/octeontx/octeontx_mbox.c b/drivers/common/octeontx/octeontx_mbox.c
index 93e6e857..880f8a40 100644
--- a/drivers/common/octeontx/octeontx_mbox.c
+++ b/drivers/common/octeontx/octeontx_mbox.c
@@ -61,9 +61,7 @@ struct mbox_ram_hdr {
int octeontx_logtype_mbox;
-RTE_INIT(otx_init_log);
-static void
-otx_init_log(void)
+RTE_INIT(otx_init_log)
{
octeontx_logtype_mbox = rte_log_register("pmd.octeontx.mbox");
if (octeontx_logtype_mbox >= 0)
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
new file mode 100644
index 00000000..c68a032a
--- /dev/null
+++ b/drivers/common/qat/Makefile
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# build directories
+QAT_CRYPTO_DIR := $(RTE_SDK)/drivers/crypto/qat
+QAT_COMPRESS_DIR := $(RTE_SDK)/drivers/compress/qat
+VPATH=$(QAT_CRYPTO_DIR):$(QAT_COMPRESS_DIR)
+
+# external library include paths
+CFLAGS += -I$(SRCDIR)/qat_adf
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -I$(QAT_CRYPTO_DIR)
+CFLAGS += -I$(QAT_COMPRESS_DIR)
+
+
+ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y)
+ CFLAGS += -DALLOW_EXPERIMENTAL_API
+ LDLIBS += -lrte_compressdev
+ SRCS-y += qat_comp.c
+ SRCS-y += qat_comp_pmd.c
+ build_qat = yes
+endif
+
+# library symmetric crypto source files
+ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
+ LDLIBS += -lrte_cryptodev
+ LDLIBS += -lcrypto
+ CFLAGS += -DBUILD_QAT_SYM
+ SRCS-y += qat_sym.c
+ SRCS-y += qat_sym_session.c
+ SRCS-y += qat_sym_pmd.c
+ build_qat = yes
+endif
+endif
+
+ifdef build_qat
+
+ # library name
+ LIB = librte_pmd_qat.a
+
+ # library version
+ LIBABIVER := 1
+ # build flags
+ CFLAGS += $(WERROR_FLAGS)
+ CFLAGS += -O3
+
+ # library common source files
+ SRCS-y += qat_device.c
+ SRCS-y += qat_common.c
+ SRCS-y += qat_logs.c
+ SRCS-y += qat_qp.c
+
+ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+ LDLIBS += -lrte_pci -lrte_bus_pci
+
+ # export include files
+ SYMLINK-y-include +=
+
+ # versioning export map
+ EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
new file mode 100644
index 00000000..80b6b25a
--- /dev/null
+++ b/drivers/common/qat/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+# This does not build a driver, but instead holds common files for
+# the crypto and compression drivers.
+build = false
+qat_deps = ['bus_pci']
+qat_sources = files('qat_common.c',
+ 'qat_qp.c',
+ 'qat_device.c',
+ 'qat_logs.c')
+qat_includes = [include_directories('.', 'qat_adf')]
+qat_ext_deps = []
+qat_cflags = []
diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index bfdbc979..1eef5513 100644
--- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -50,8 +50,10 @@
#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
-#define ADF_NUM_BUNDLES_PER_DEV 1
-#define ADF_NUM_SYM_QPS_PER_BUNDLE 2
+/* Maximum number of qps on a device for any service type */
+#define ADF_MAX_QPS_ON_ANY_SERVICE 2
+#define ADF_RING_DIR_TX 0
+#define ADF_RING_DIR_RX 1
/* Valid internal msg size values */
#define ADF_MSG_SIZE_32 0x01
@@ -130,4 +132,5 @@ do { \
#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
ADF_RING_CSR_INT_FLAG_AND_COL, value)
-#endif
+
+#endif /*ADF_TRANSPORT_ACCESS_MACROS_H */
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index ae39b7f1..8f7cb37b 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -117,6 +117,10 @@ struct icp_qat_fw_comn_resp {
#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
+#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
icp_qat_fw_comn_req_hdr_t.service_type
@@ -133,6 +137,16 @@ struct icp_qat_fw_comn_resp {
#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
@@ -204,29 +218,44 @@ struct icp_qat_fw_comn_resp {
& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+#define ICP_QAT_FW_COMN_NEXT_ID_SET_2(next_curr_id, val) \
+ do { \
+ (next_curr_id) = \
+ (((next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ (((val) << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK)) \
+ } while (0)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET_2(next_curr_id, val) \
+ do { \
+ (next_curr_id) = \
+ (((next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) \
+ } while (0)
+
#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
-
-#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
- ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
- QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
- (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
- QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
- (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
- QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
- (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
- QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
+#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
+#define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+#define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+ QAT_COMN_RESP_PKE_STATUS_MASK)
+
#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
QAT_COMN_RESP_CMP_STATUS_MASK)
@@ -235,10 +264,18 @@ struct icp_qat_fw_comn_resp {
QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
QAT_COMN_RESP_XLAT_STATUS_MASK)
+#define ICP_QAT_FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS, \
+ QAT_COMN_RESP_XLT_WA_APPLIED_MASK)
+
#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+#define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
+ QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
+
#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
@@ -257,8 +294,16 @@ struct icp_qat_fw_comn_resp {
#define ERR_CODE_OVERFLOW_ERROR -11
#define ERR_CODE_SOFT_ERROR -12
#define ERR_CODE_FATAL_ERROR -13
-#define ERR_CODE_SSM_ERROR -14
-#define ERR_CODE_ENDPOINT_ERROR -15
+#define ERR_CODE_COMP_OUTPUT_CORRUPTION -14
+#define ERR_CODE_HW_INCOMPLETE_FILE -15
+#define ERR_CODE_SSM_ERROR -16
+#define ERR_CODE_ENDPOINT_ERROR -17
+#define ERR_CODE_CNV_ERROR -18
+#define ERR_CODE_EMPTY_DYM_BLOCK -19
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22
+#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23
enum icp_qat_fw_slice {
ICP_QAT_FW_SLICE_NULL = 0,
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
new file mode 100644
index 00000000..81381772
--- /dev/null
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_FW_COMP_H_
+#define _ICP_QAT_FW_COMP_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_comp_cmd_id {
+ ICP_QAT_FW_COMP_CMD_STATIC = 0,
+ /*!< Static Compress Request */
+
+ ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
+ /*!< Dynamic Compress Request */
+
+ ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
+ /*!< Decompress Request */
+
+ ICP_QAT_FW_COMP_CMD_DELIMITER
+ /**< Delimiter type */
+};
+
+/**< Flag usage */
+
+#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that session is stateless
+ */
+
+#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that session is stateful
+ */
+
+#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing secure RAM from being used as
+ * an intermediate buffer is DISABLED.
+ */
+
+#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing secure RAM from being used as
+ * an intermediate buffer is ENABLED.
+ */
+
+/**< Flag mask & bit position */
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the session type
+ */
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine the session type
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for enhanced auto select best
+ */
+
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for enhanced auto select best
+ */
+
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for disabling type zero header write back
+ * when Enhanced autoselect best is enabled. If set firmware does
+ * not return type0 store block header, only copies src to dest.
+ * (if best output is Type0)
+ */
+
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for flag used to disable secure ram from
+ * being used as an intermediate buffer.
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for disable secure ram for use as an intermediate
+ * buffer.
+ */
+
+#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
+ ret_uncomp, secure_ram) \
+ ((((sesstype)&ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \
+ << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
+ (((autoselect)&ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \
+ << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
+ (((enhanced_asb)&ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) \
+ << ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
+ (((ret_uncomp)&ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) \
+ << ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
+ (((secure_ram)&ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \
+ << ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
+
+union icp_qat_fw_comp_req_hdr_cd_pars {
+ /**< LWs 2-5 */
+ struct {
+ uint64_t content_desc_addr;
+ /**< Address of the content descriptor */
+
+ uint16_t content_desc_resrvd1;
+ /**< Content descriptor reserved field */
+
+ uint8_t content_desc_params_sz;
+ /**< Size of the content descriptor parameters in quad words.
+ * These parameters describe the session setup configuration
+ * info for the slices that this request relies upon i.e.
+ * the configuration word and cipher key needed by the cipher
+ * slice if there is a request for cipher processing.
+ */
+
+ uint8_t content_desc_hdr_resrvd2;
+ /**< Content descriptor reserved field */
+
+ uint32_t content_desc_resrvd3;
+ /**< Content descriptor reserved field */
+ } s;
+
+ struct {
+ uint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /* Compression Slice Config Word */
+
+ uint32_t content_desc_resrvd4;
+ /**< Content descriptor reserved field */
+
+ } sl;
+
+};
+
+struct icp_qat_fw_comp_req_params {
+ /**< LW 14 */
+ uint32_t comp_len;
+ /**< Size of input to process in bytes Note: Only EOP requests can be
+ * odd for decompression. IA must set LSB to zero for odd sized
+ * intermediate inputs
+ */
+
+ /**< LW 15 */
+ uint32_t out_buffer_sz;
+ /**< Size of output buffer in bytes */
+
+ /**< LW 16 */
+ uint32_t initial_crc32;
+ /**< CRC of previously processed bytes */
+
+ /**< LW 17 */
+ uint32_t initial_adler;
+ /**< Adler of previously processed bytes */
+
+ /**< LW 18 */
+ uint32_t req_par_flags;
+
+ /**< LW 19 */
+ uint32_t rsrvd;
+};
+
+#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr) \
+ ((((sop)&ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \
+ (((eop)&ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \
+ (((bfinal)&ICP_QAT_FW_COMP_BFINAL_MASK) \
+ << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
+ ((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \
+ ((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK) \
+ << ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS))
+
+#define ICP_QAT_FW_COMP_NOT_SOP 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request is NOT Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_SOP 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request IS Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_NOT_EOP 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request is NOT Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_EOP 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request IS End of Packet
+ */
+
+#define ICP_QAT_FW_COMP_NOT_BFINAL 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing to indicate firmware this is not the last block
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing to indicate firmware this is the last block
+ */
+
+#define ICP_QAT_FW_COMP_NO_CNV 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that NO cnv check is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_CNV 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that a cnv check IS to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that NO cnv recovery is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that a cnv recovery is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_SOP_BITPOS 0
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for SOP
+ */
+
+#define ICP_QAT_FW_COMP_SOP_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine SOP
+ */
+
+#define ICP_QAT_FW_COMP_EOP_BITPOS 1
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for EOP
+ */
+
+#define ICP_QAT_FW_COMP_EOP_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine EOP
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the bfinal bit
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the bfinal bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the CNV bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_BITPOS 16
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the CNV bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the CNV Recovery bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the CNV Recovery bit
+ */
+
+struct icp_qat_fw_xlt_req_params {
+ /**< LWs 20-21 */
+ uint64_t inter_buff_ptr;
+ /**< This field specifies the physical address of an intermediate
+ * buffer SGL array. The array contains a pair of 64-bit
+ * intermediate buffer pointers to SGL buffer descriptors, one pair
+ * per CPM. Please refer to the CPM1.6 Firmware Interface HLD
+ * specification for more details.
+ */
+};
+
+
+struct icp_qat_fw_comp_cd_hdr {
+ /**< LW 24 */
+ uint16_t ram_bank_flags;
+ /**< Flags to show which ram banks to access */
+
+ uint8_t comp_cfg_offset;
+ /**< Quad word offset from the content descriptor parameters address
+ * to the parameters for the compression processing
+ */
+
+ uint8_t next_curr_id;
+ /**< This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the compressed data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * anymore slices after compression
+ * Current Id: Initialised with the compression slice type
+ */
+
+ /**< LW 25 */
+ uint32_t resrvd;
+ /**< LWs 26-27 */
+
+ uint64_t comp_state_addr;
+ /**< Pointer to compression state */
+
+ /**< LWs 28-29 */
+ uint64_t ram_banks_addr;
+ /**< Pointer to banks */
+
+};
+
+
+struct icp_qat_fw_xlt_cd_hdr {
+ /**< LW 30 */
+ uint16_t resrvd1;
+ /**< Reserved field and assumed set to 0 */
+
+ uint8_t resrvd2;
+ /**< Reserved field and assumed set to 0 */
+
+ uint8_t next_curr_id;
+ /**< This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the translated data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * any more slices after compression
+ * Current Id: Initialised with the translation slice type
+ */
+
+ /**< LW 31 */
+ uint32_t resrvd3;
+ /**< Reserved and should be set to zero, needed for quadword
+ * alignment
+ */
+};
+
+struct icp_qat_fw_comp_req {
+ /**< LWs 0-1 */
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ /**< Common request header - for Service Command Id,
+ * use service-specific Compression Command Id.
+ * Service Specific Flags - use Compression Command Flags
+ */
+
+ /**< LWs 2-5 */
+ union icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
+ /**< Compression service-specific content descriptor field which points
+ * either to a content descriptor parameter block or contains the
+ * compression slice config word.
+ */
+
+ /**< LWs 6-13 */
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ /**< Common request middle section */
+
+ /**< LWs 14-19 */
+ struct icp_qat_fw_comp_req_params comp_pars;
+ /**< Compression request Parameters block */
+
+ /**< LWs 20-21 */
+ union {
+ struct icp_qat_fw_xlt_req_params xlt_pars;
+ /**< Translation request Parameters block */
+ uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved if not used for translation */
+
+ } u1;
+
+ /**< LWs 22-23 */
+ union {
+ uint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved - not used if Batch and Pack is disabled.*/
+
+ uint64_t bnp_res_table_addr;
+ /**< A generic pointer to the unbounded list of
+ * icp_qat_fw_resp_comp_pars members. This pointer is only
+ * used when the Batch and Pack is enabled.
+ */
+ } u3;
+
+ /**< LWs 24-29 */
+ struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
+ /**< Compression request content descriptor control block header */
+
+ /**< LWs 30-31 */
+ union {
+ struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
+ /**< Translation request content descriptor
+ * control block header
+ */
+
+ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved if not used for translation */
+ } u2;
+};
+
+struct icp_qat_fw_resp_comp_pars {
+ /**< LW 4 */
+ uint32_t input_byte_counter;
+ /**< Input byte counter */
+
+ /**< LW 5 */
+ uint32_t output_byte_counter;
+ /**< Output byte counter */
+
+ /**< LW 6 & 7*/
+ union {
+ uint64_t curr_chksum;
+ struct {
+ /**< LW 6 */
+ uint32_t curr_crc32;
+ /**< LW 7 */
+ uint32_t curr_adler_32;
+ };
+ };
+};
+
+struct icp_qat_fw_comp_resp {
+ /**< LWs 0-1 */
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ /**< Common interface response format see icp_qat_fw.h */
+
+ /**< LWs 2-3 */
+ uint64_t opaque_data;
+ /**< Opaque data passed from the request to the response message */
+
+ /**< LWs 4-7 */
+ struct icp_qat_fw_resp_comp_pars comp_resp_pars;
+ /**< Common response params (checksums and byte counts) */
+};
+
+#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index c33bc3fe..c33bc3fe 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/common/qat/qat_adf/icp_qat_hw.h
index 56e3cf79..e7961dba 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_hw.h
@@ -72,19 +72,44 @@ struct icp_qat_hw_auth_config {
#define QAT_AUTH_ALGO_MASK 0xF
#define QAT_AUTH_CMP_BITPOS 8
#define QAT_AUTH_CMP_MASK 0x7F
-#define QAT_AUTH_SHA3_PADDING_BITPOS 16
-#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1
#define QAT_AUTH_ALGO_SHA3_BITPOS 22
#define QAT_AUTH_ALGO_SHA3_MASK 0x3
-#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
- (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
- ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
- (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
- QAT_AUTH_ALGO_SHA3_BITPOS) | \
- (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
- (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
- & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
- ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF
+#define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0
+#define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1
+#define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0
+
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+ ((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+ (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) \
+ << QAT_AUTH_ALGO_SHA3_BITPOS) | \
+ (((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT) & \
+ QAT_AUTH_SHA3_PADDING_DISABLE_MASK) \
+ << QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) | \
+ (((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT) & \
+ QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK) \
+ << QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) | \
+ (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER \
+ ((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED) & \
+ QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK) \
+ << QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) | \
+ (((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED) & \
+ QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK) \
+ << QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS))
struct icp_qat_hw_auth_counter {
uint32_t counter;
@@ -107,13 +132,13 @@ struct icp_qat_hw_auth_setup {
#define ICP_QAT_HW_MD5_STATE1_SZ 16
#define ICP_QAT_HW_SHA1_STATE1_SZ 20
#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
#define ICP_QAT_HW_SHA256_STATE1_SZ 32
#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
#define ICP_QAT_HW_SHA512_STATE1_SZ 64
#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
-#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
@@ -121,17 +146,18 @@ struct icp_qat_hw_auth_setup {
#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+
#define ICP_QAT_HW_NULL_STATE2_SZ 32
#define ICP_QAT_HW_MD5_STATE2_SZ 16
#define ICP_QAT_HW_SHA1_STATE2_SZ 20
#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
#define ICP_QAT_HW_SHA256_STATE2_SZ 32
#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
#define ICP_QAT_HW_SHA512_STATE2_SZ 64
#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
@@ -154,6 +180,12 @@ struct icp_qat_hw_auth_sha512 {
uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
};
+struct icp_qat_hw_auth_sha3_512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ uint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+};
+
struct icp_qat_hw_auth_algo_blk {
struct icp_qat_hw_auth_sha512 sha;
};
@@ -283,4 +315,72 @@ struct icp_qat_hw_cipher_algo_blk {
uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
} __rte_cache_aligned;
+/* ========================================================================= */
+/* COMPRESSION SLICE */
+/* ========================================================================= */
+
+enum icp_qat_hw_compression_direction {
+ ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
+ ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
+ ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_delayed_match {
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_algo {
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
+ ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
+ ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
+};
+
+
+enum icp_qat_hw_compression_depth {
+ ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
+ ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
+ ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
+ ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
+ ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 4
+};
+
+enum icp_qat_hw_compression_file_type {
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
+};
+
+struct icp_qat_hw_compression_config {
+ uint32_t val;
+ uint32_t reserved;
+};
+
+#define QAT_COMPRESSION_DIR_BITPOS 4
+#define QAT_COMPRESSION_DIR_MASK 0x7
+#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
+#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
+#define QAT_COMPRESSION_ALGO_BITPOS 31
+#define QAT_COMPRESSION_ALGO_MASK 0x1
+#define QAT_COMPRESSION_DEPTH_BITPOS 28
+#define QAT_COMPRESSION_DEPTH_MASK 0x7
+#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
+#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
+
+#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( \
+ dir, delayed, algo, depth, filetype) \
+ ((((dir) & QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) | \
+ (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) \
+ << QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
+ (((algo) & QAT_COMPRESSION_ALGO_MASK) \
+ << QAT_COMPRESSION_ALGO_BITPOS) | \
+ (((depth) & QAT_COMPRESSION_DEPTH_MASK) \
+ << QAT_COMPRESSION_DEPTH_BITPOS) | \
+ (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) \
+ << QAT_COMPRESSION_FILE_TYPE_BITPOS))
+
#endif
diff --git a/drivers/common/qat/qat_common.c b/drivers/common/qat/qat_common.c
new file mode 100644
index 00000000..47538669
--- /dev/null
+++ b/drivers/common/qat/qat_common.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "qat_common.h"
+#include "qat_device.h"
+#include "qat_logs.h"
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset,
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs)
+{
+ int res = -EINVAL;
+ uint32_t buf_len, nr;
+ struct qat_sgl *list = (struct qat_sgl *)list_in;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ uint8_t *virt_addr[max_segs];
+#endif
+
+ for (nr = buf_len = 0; buf && nr < max_segs; buf = buf->next) {
+ if (offset >= rte_pktmbuf_data_len(buf)) {
+ offset -= rte_pktmbuf_data_len(buf);
+ continue;
+ }
+
+ list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+ list->buffers[nr].resrvd = 0;
+ list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ virt_addr[nr] = rte_pktmbuf_mtod_offset(buf, uint8_t*, offset);
+#endif
+ offset = 0;
+ buf_len += list->buffers[nr].len;
+
+ if (buf_len >= data_len) {
+ list->buffers[nr].len -= buf_len - data_len;
+ res = 0;
+ break;
+ }
+ ++nr;
+ }
+
+ if (unlikely(res != 0)) {
+ if (nr == max_segs) {
+ QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+ max_segs);
+ } else {
+ QAT_DP_LOG(ERR, "Mbuf chain is too short");
+ }
+ } else {
+
+ list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+ for (nr = 0; nr < list->num_bufs; nr++) {
+ QAT_DP_LOG(INFO,
+ "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+ nr, list->buffers[nr].len,
+ list->buffers[nr].addr);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL",
+ virt_addr[nr],
+ list->buffers[nr].len);
+ }
+#endif
+ }
+
+ return res;
+}
+
+void qat_stats_get(struct qat_pci_device *dev,
+ struct qat_common_stats *stats,
+ enum qat_service_type service)
+{
+ int i;
+ struct qat_qp **qp;
+
+ if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) {
+ QAT_LOG(ERR, "invalid param: stats %p, dev %p, service %d",
+ stats, dev, service);
+ return;
+ }
+
+ qp = dev->qps_in_use[service];
+ for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
+ if (qp[i] == NULL) {
+ QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
+ service, i);
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->stats.enqueued_count;
+ stats->dequeued_count += qp[i]->stats.dequeued_count;
+ stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
+ }
+}
+
+void qat_stats_reset(struct qat_pci_device *dev,
+ enum qat_service_type service)
+{
+ int i;
+ struct qat_qp **qp;
+
+ if (dev == NULL || service >= QAT_SERVICE_INVALID) {
+ QAT_LOG(ERR, "invalid param: dev %p, service %d",
+ dev, service);
+ return;
+ }
+
+ qp = dev->qps_in_use[service];
+ for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
+ if (qp[i] == NULL) {
+ QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
+ service, i);
+ continue;
+ }
+ memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
+ }
+
+ QAT_LOG(DEBUG, "QAT: %d stats cleared", service);
+}
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
new file mode 100644
index 00000000..d4bef539
--- /dev/null
+++ b/drivers/common/qat/qat_common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_COMMON_H_
+#define _QAT_COMMON_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+/**< Intel(R) QAT device name for PCI registration */
+#define QAT_PCI_NAME qat
+#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+
+/* Intel(R) QuickAssist Technology device generation is enumerated
+ * from one according to the generation of the device
+ */
+enum qat_device_gen {
+ QAT_GEN1 = 1,
+ QAT_GEN2
+};
+
+enum qat_service_type {
+ QAT_SERVICE_ASYMMETRIC = 0,
+ QAT_SERVICE_SYMMETRIC,
+ QAT_SERVICE_COMPRESSION,
+ QAT_SERVICE_INVALID
+};
+
+#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID)
+
+/**< Common struct for scatter-gather list operations */
+struct qat_flat_buf {
+ uint32_t len;
+ uint32_t resrvd;
+ uint64_t addr;
+} __rte_packed;
+
+#define qat_sgl_hdr struct { \
+ uint64_t resrvd; \
+ uint32_t num_bufs; \
+ uint32_t num_mapped_bufs; \
+}
+
+__extension__
+struct qat_sgl {
+ qat_sgl_hdr;
+ /* flexible array of flat buffers*/
+ struct qat_flat_buf buffers[0];
+} __rte_packed __rte_cache_aligned;
+
+/** Common, i.e. not service-specific, statistics */
+struct qat_common_stats {
+ uint64_t enqueued_count;
+ /**< Count of all operations enqueued */
+ uint64_t dequeued_count;
+ /**< Count of all operations dequeued */
+
+ uint64_t enqueue_err_count;
+ /**< Total error count on operations enqueued */
+ uint64_t dequeue_err_count;
+ /**< Total error count on operations dequeued */
+};
+
+struct qat_pci_device;
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset,
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs);
+void
+qat_stats_get(struct qat_pci_device *dev,
+ struct qat_common_stats *stats,
+ enum qat_service_type service);
+void
+qat_stats_reset(struct qat_pci_device *dev,
+ enum qat_service_type service);
+
+#endif /* _QAT_COMMON_H_ */
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
new file mode 100644
index 00000000..f32d7235
--- /dev/null
+++ b/drivers/common/qat/qat_device.c
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_string_fns.h>
+
+#include "qat_device.h"
+#include "adf_transport_access_macros.h"
+#include "qat_sym_pmd.h"
+
+/* Hardware device information per generation */
+__extension__
+struct qat_gen_hw_data qat_gen_config[] = {
+ [QAT_GEN1] = {
+ .dev_gen = QAT_GEN1,
+ .qp_hw_data = qat_gen1_qps,
+ },
+ [QAT_GEN2] = {
+ .dev_gen = QAT_GEN2,
+ .qp_hw_data = qat_gen1_qps,
+ /* gen2 has same ring layout as gen1 */
+ },
+};
+
+
+static struct qat_pci_device qat_pci_devices[RTE_PMD_QAT_MAX_PCI_DEVICES];
+static int qat_nb_pci_devices;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+
+static const struct rte_pci_id pci_id_qat_map[] = {
+ {
+ RTE_PCI_DEVICE(0x8086, 0x0443),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x37c9),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x19e3),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x6f55),
+ },
+ {.device_id = 0},
+};
+
+
+static struct qat_pci_device *
+qat_pci_get_dev(uint8_t dev_id)
+{
+ return &qat_pci_devices[dev_id];
+}
+
+static struct qat_pci_device *
+qat_pci_get_named_dev(const char *name)
+{
+ struct qat_pci_device *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) {
+ dev = &qat_pci_devices[i];
+
+ if ((dev->attached == QAT_ATTACHED) &&
+ (strcmp(dev->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static uint8_t
+qat_pci_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; dev_id++) {
+ if (qat_pci_devices[dev_id].attached == QAT_DETACHED)
+ break;
+ }
+ return dev_id;
+}
+
+struct qat_pci_device *
+qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev)
+{
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return qat_pci_get_named_dev(name);
+}
+
+struct qat_pci_device *
+qat_pci_device_allocate(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_dev;
+ uint8_t qat_dev_id;
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+ if (qat_pci_get_named_dev(name) != NULL) {
+ QAT_LOG(ERR, "QAT device with name %s already allocated!",
+ name);
+ return NULL;
+ }
+
+ qat_dev_id = qat_pci_find_free_device_index();
+ if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) {
+ QAT_LOG(ERR, "Reached maximum number of QAT devices");
+ return NULL;
+ }
+
+ qat_dev = qat_pci_get_dev(qat_dev_id);
+ memset(qat_dev, 0, sizeof(*qat_dev));
+ strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN);
+ qat_dev->qat_dev_id = qat_dev_id;
+ qat_dev->pci_dev = pci_dev;
+ switch (qat_dev->pci_dev->id.device_id) {
+ case 0x0443:
+ qat_dev->qat_dev_gen = QAT_GEN1;
+ break;
+ case 0x37c9:
+ case 0x19e3:
+ case 0x6f55:
+ qat_dev->qat_dev_gen = QAT_GEN2;
+ break;
+ default:
+ QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
+ return NULL;
+ }
+
+ rte_spinlock_init(&qat_dev->arb_csr_lock);
+
+ qat_dev->attached = QAT_ATTACHED;
+
+ qat_nb_pci_devices++;
+
+ QAT_LOG(DEBUG, "QAT device %d allocated, name %s, total QATs %d",
+ qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
+
+ return qat_dev;
+}
+
+int
+qat_pci_device_release(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_dev;
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+ qat_dev = qat_pci_get_named_dev(name);
+ if (qat_dev != NULL) {
+
+ /* Check that there are no service devs still on pci device */
+ if (qat_dev->sym_dev != NULL)
+ return -EBUSY;
+
+ qat_dev->attached = QAT_DETACHED;
+ qat_nb_pci_devices--;
+ }
+ QAT_LOG(DEBUG, "QAT device %s released, total QATs %d",
+ name, qat_nb_pci_devices);
+ return 0;
+}
+
+static int
+qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev,
+ struct rte_pci_device *pci_dev)
+{
+ qat_sym_dev_destroy(qat_pci_dev);
+ qat_comp_dev_destroy(qat_pci_dev);
+ qat_asym_dev_destroy(qat_pci_dev);
+ return qat_pci_device_release(pci_dev);
+}
+
+static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int ret = 0;
+ struct qat_pci_device *qat_pci_dev;
+
+ QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ qat_pci_dev = qat_pci_device_allocate(pci_dev);
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+
+ ret = qat_sym_dev_create(qat_pci_dev);
+ if (ret != 0)
+ goto error_out;
+
+ ret = qat_comp_dev_create(qat_pci_dev);
+ if (ret != 0)
+ goto error_out;
+
+ ret = qat_asym_dev_create(qat_pci_dev);
+ if (ret != 0)
+ goto error_out;
+
+ return 0;
+
+error_out:
+ qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+ return ret;
+
+}
+
+static int qat_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_pci_dev;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev);
+ if (qat_pci_dev == NULL)
+ return 0;
+
+ return qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+}
+
+static struct rte_pci_driver rte_qat_pmd = {
+ .id_table = pci_id_qat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = qat_pci_probe,
+ .remove = qat_pci_remove
+};
+
+__attribute__((weak)) int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map);
diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h
new file mode 100644
index 00000000..9599fc59
--- /dev/null
+++ b/drivers/common/qat/qat_device.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_DEVICE_H_
+#define _QAT_DEVICE_H_
+
+#include <rte_bus_pci.h>
+
+#include "qat_common.h"
+#include "qat_logs.h"
+#include "adf_transport_access_macros.h"
+#include "qat_qp.h"
+
+#define QAT_DETACHED (0)
+#define QAT_ATTACHED (1)
+
+#define QAT_DEV_NAME_MAX_LEN 64
+
+/*
+ * This struct holds all the data about a QAT pci device
+ * including data about all services it supports.
+ * It contains
+ * - hw_data
+ * - config data
+ * - runtime data
+ */
+struct qat_sym_dev_private;
+struct qat_comp_dev_private;
+
+struct qat_pci_device {
+
+ /* Data used by all services */
+ char name[QAT_DEV_NAME_MAX_LEN];
+ /**< Name of qat pci device */
+ uint8_t qat_dev_id;
+ /**< Device instance for this qat pci device */
+ struct rte_pci_device *pci_dev;
+ /**< PCI information. */
+ enum qat_device_gen qat_dev_gen;
+ /**< QAT device generation */
+ rte_spinlock_t arb_csr_lock;
+ /**< lock to protect accesses to the arbiter CSR */
+ __extension__
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
+
+ struct qat_qp *qps_in_use[QAT_MAX_SERVICES][ADF_MAX_QPS_ON_ANY_SERVICE];
+ /**< links to qps set up for each service, index same as on API */
+
+ /* Data relating to symmetric crypto service */
+ struct qat_sym_dev_private *sym_dev;
+ /**< link back to cryptodev private data */
+ struct rte_device sym_rte_dev;
+ /**< This represents the crypto subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
+ /* Data relating to compression service */
+ struct qat_comp_dev_private *comp_dev;
+ /**< link back to compressdev private data */
+
+ /* Data relating to asymmetric crypto service */
+
+};
+
+struct qat_gen_hw_data {
+ enum qat_device_gen dev_gen;
+ const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE];
+};
+
+extern struct qat_gen_hw_data qat_gen_config[];
+
+struct qat_pci_device *
+qat_pci_device_allocate(struct rte_pci_device *pci_dev);
+
+int
+qat_pci_device_release(struct rte_pci_device *pci_dev);
+
+struct qat_pci_device *
+qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev);
+
+/* declaration needed for weak functions */
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+#endif /* _QAT_DEVICE_H_ */
diff --git a/drivers/common/qat/qat_logs.c b/drivers/common/qat/qat_logs.c
new file mode 100644
index 00000000..7a861709
--- /dev/null
+++ b/drivers/common/qat/qat_logs.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_log.h>
+#include <rte_hexdump.h>
+
+#include "qat_logs.h"
+
+int qat_gen_logtype;
+int qat_dp_logtype;
+
+int
+qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len)
+{
+ if (level > rte_log_get_global_level())
+ return 0;
+ if (level > (uint32_t)(rte_log_get_level(logtype)))
+ return 0;
+
+ rte_hexdump(rte_logs.file == NULL ? stderr : rte_logs.file,
+ title, buf, len);
+ return 0;
+}
+
+RTE_INIT(qat_pci_init_log)
+{
+ /* Non-data-path logging for pci device and all services */
+ qat_gen_logtype = rte_log_register("pmd.qat_general");
+ if (qat_gen_logtype >= 0)
+ rte_log_set_level(qat_gen_logtype, RTE_LOG_NOTICE);
+
+ /* data-path logging for all services */
+ qat_dp_logtype = rte_log_register("pmd.qat_dp");
+ if (qat_dp_logtype >= 0)
+ rte_log_set_level(qat_dp_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/common/qat/qat_logs.h b/drivers/common/qat/qat_logs.h
new file mode 100644
index 00000000..4baea12c
--- /dev/null
+++ b/drivers/common/qat/qat_logs.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_LOGS_H_
+#define _QAT_LOGS_H_
+
+extern int qat_gen_logtype;
+extern int qat_dp_logtype;
+
+#define QAT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, qat_gen_logtype, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define QAT_DP_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, qat_dp_logtype, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define QAT_DP_HEXDUMP_LOG(level, title, buf, len) \
+ qat_hexdump_log(RTE_LOG_ ## level, qat_dp_logtype, title, buf, len)
+
+/**
+ * qat_hexdump_log - Dump out memory in a special hex dump format.
+ *
+ * Dump out the message buffer in a special hex dump output format with
+ * characters printed for each line of 16 hex values. The message will be sent
+ * to the stream defined by rte_logs.file or to stderr in case of rte_logs.file
+ * is undefined.
+ */
+int
+qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len);
+
+#endif /* _QAT_LOGS_H_ */
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
new file mode 100644
index 00000000..7ca7a45e
--- /dev/null
+++ b/drivers/common/qat/qat_qp.c
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_prefetch.h>
+
+#include "qat_logs.h"
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "qat_sym.h"
+#include "qat_comp.h"
+#include "adf_transport_access_macros.h"
+
+
+#define ADF_MAX_DESC 4096
+#define ADF_MIN_DESC 128
+
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * index), value)
+
+__extension__
+const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
+ [ADF_MAX_QPS_ON_ANY_SERVICE] = {
+ /* queue pairs which provide an asymmetric crypto service */
+ [QAT_SERVICE_ASYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 0,
+ .rx_ring_num = 8,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+
+ }, {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 1,
+ .rx_ring_num = 9,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a symmetric crypto service */
+ [QAT_SERVICE_SYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 2,
+ .rx_ring_num = 10,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ },
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 3,
+ .rx_ring_num = 11,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a compression service */
+ [QAT_SERVICE_COMPRESSION] = {
+ {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 6,
+ .rx_ring_num = 14,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }, {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 7,
+ .rx_ring_num = 15,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ }
+};
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes);
+static void qat_queue_delete(struct qat_queue *queue);
+static int qat_queue_create(struct qat_pci_device *qat_dev,
+ struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *queue_size_for_csr);
+static void adf_configure_queues(struct qat_qp *queue);
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock);
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock);
+
+
+int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+ enum qat_service_type service)
+{
+ int i, count;
+
+ for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
+ if (qp_hw_data[i].service_type == service)
+ count++;
+ return count;
+}
+
+static const struct rte_memzone *
+queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ QAT_LOG(DEBUG, "re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+
+ QAT_LOG(ERR, "Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+int qat_qp_setup(struct qat_pci_device *qat_dev,
+ struct qat_qp **qp_addr,
+ uint16_t queue_pair_id,
+ struct qat_qp_config *qat_qp_conf)
+
+{
+ struct qat_qp *qp;
+ struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+ char op_cookie_pool_name[RTE_RING_NAMESIZE];
+ uint32_t i;
+
+ QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
+ queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
+
+ if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
+ (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
+ QAT_LOG(ERR, "Can't create qp for %u descriptors",
+ qat_qp_conf->nb_descriptors);
+ return -EINVAL;
+ }
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ QAT_LOG(ERR, "Could not find VF config space "
+ "(UIO driver attached?).");
+ return -EINVAL;
+ }
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc("qat PMD qp metadata",
+ sizeof(*qp), RTE_CACHE_LINE_SIZE);
+ if (qp == NULL) {
+ QAT_LOG(ERR, "Failed to alloc mem for qp struct");
+ return -ENOMEM;
+ }
+ qp->nb_descriptors = qat_qp_conf->nb_descriptors;
+ qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
+ qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
+ RTE_CACHE_LINE_SIZE);
+ if (qp->op_cookies == NULL) {
+ QAT_LOG(ERR, "Failed to alloc mem for cookie");
+ rte_free(qp);
+ return -ENOMEM;
+ }
+
+ qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
+ qp->inflights16 = 0;
+
+ if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
+ ADF_RING_DIR_TX) != 0) {
+ QAT_LOG(ERR, "Tx queue create failed "
+ "queue_pair_id=%u", queue_pair_id);
+ goto create_err;
+ }
+
+ if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
+ ADF_RING_DIR_RX) != 0) {
+ QAT_LOG(ERR, "Rx queue create failed "
+ "queue_pair_id=%hu", queue_pair_id);
+ qat_queue_delete(&(qp->tx_q));
+ goto create_err;
+ }
+
+ adf_configure_queues(qp);
+ adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
+ &qat_dev->arb_csr_lock);
+
+ snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
+ "%s%d_cookies_%s_qp%hu",
+ pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+ qat_qp_conf->service_str, queue_pair_id);
+
+ QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
+ qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+ if (qp->op_cookie_pool == NULL)
+ qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
+ qp->nb_descriptors,
+ qat_qp_conf->cookie_size, 64, 0,
+ NULL, NULL, NULL, NULL, qat_qp_conf->socket_id,
+ 0);
+ if (!qp->op_cookie_pool) {
+ QAT_LOG(ERR, "QAT PMD Cannot create"
+ " op mempool");
+ goto create_err;
+ }
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+ if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
+ QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
+ goto create_err;
+ }
+ }
+
+ qp->qat_dev_gen = qat_dev->qat_dev_gen;
+ qp->build_request = qat_qp_conf->build_request;
+ qp->service_type = qat_qp_conf->hw->service_type;
+ qp->qat_dev = qat_dev;
+
+ QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
+ queue_pair_id, op_cookie_pool_name);
+
+ *qp_addr = qp;
+ return 0;
+
+create_err:
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+ rte_free(qp->op_cookies);
+ rte_free(qp);
+ return -EFAULT;
+}
+
+int qat_qp_release(struct qat_qp **qp_addr)
+{
+ struct qat_qp *qp = *qp_addr;
+ uint32_t i;
+
+ if (qp == NULL) {
+ QAT_LOG(DEBUG, "qp already freed");
+ return 0;
+ }
+
+ QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
+ qp->qat_dev->qat_dev_id);
+
+ /* Don't free memory if there are still responses to be processed */
+ if (qp->inflights16 == 0) {
+ qat_queue_delete(&(qp->tx_q));
+ qat_queue_delete(&(qp->rx_q));
+ } else {
+ return -EAGAIN;
+ }
+
+ adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
+ &qp->qat_dev->arb_csr_lock);
+
+ for (i = 0; i < qp->nb_descriptors; i++)
+ rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
+
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+
+ rte_free(qp->op_cookies);
+ rte_free(qp);
+ *qp_addr = NULL;
+ return 0;
+}
+
+
+static void qat_queue_delete(struct qat_queue *queue)
+{
+ const struct rte_memzone *mz;
+ int status = 0;
+
+ if (queue == NULL) {
+ QAT_LOG(DEBUG, "Invalid queue");
+ return;
+ }
+ QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
+ queue->hw_queue_number, queue->memz_name);
+
+ mz = rte_memzone_lookup(queue->memz_name);
+ if (mz != NULL) {
+ /* Write an unused pattern to the queue memory. */
+ memset(queue->base_addr, 0x7F, queue->queue_size);
+ status = rte_memzone_free(mz);
+ if (status != 0)
+ QAT_LOG(ERR, "Error %d on freeing queue %s",
+ status, queue->memz_name);
+ } else {
+ QAT_LOG(DEBUG, "queue %s doesn't exist",
+ queue->memz_name);
+ }
+}
+
+static int
+qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
+ struct qat_qp_config *qp_conf, uint8_t dir)
+{
+ uint64_t queue_base;
+ void *io_addr;
+ const struct rte_memzone *qp_mz;
+ struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+ int ret = 0;
+ uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
+ qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
+ uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
+
+ queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
+ queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
+ qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
+
+ if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+ QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a memzone for the queue - create a unique name.
+ */
+ snprintf(queue->memz_name, sizeof(queue->memz_name),
+ "%s_%d_%s_%s_%d_%d",
+ pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+ qp_conf->service_str, "qp_mem",
+ queue->hw_bundle_number, queue->hw_queue_number);
+ qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+ qp_conf->socket_id);
+ if (qp_mz == NULL) {
+ QAT_LOG(ERR, "Failed to allocate ring memzone");
+ return -ENOMEM;
+ }
+
+ queue->base_addr = (char *)qp_mz->addr;
+ queue->base_phys_addr = qp_mz->iova;
+ if (qat_qp_check_queue_alignment(queue->base_phys_addr,
+ queue_size_bytes)) {
+ QAT_LOG(ERR, "Invalid alignment on queue create "
+ " 0x%"PRIx64"\n",
+ queue->base_phys_addr);
+ ret = -EFAULT;
+ goto queue_create_err;
+ }
+
+ if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
+ &(queue->queue_size)) != 0) {
+ QAT_LOG(ERR, "Invalid num inflights");
+ ret = -EINVAL;
+ goto queue_create_err;
+ }
+
+ queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
+ ADF_BYTES_TO_MSG_SIZE(desc_size));
+ queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
+
+ if (queue->max_inflights < 2) {
+ QAT_LOG(ERR, "Invalid num inflights");
+ ret = -EINVAL;
+ goto queue_create_err;
+ }
+ queue->head = 0;
+ queue->tail = 0;
+ queue->msg_size = desc_size;
+
+ /*
+ * Write an unused pattern to the queue memory.
+ */
+ memset(queue->base_addr, 0x7F, queue_size_bytes);
+
+ queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
+ queue->queue_size);
+
+ io_addr = pci_dev->mem_resource[0].addr;
+
+ WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_base);
+
+ QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
+ " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u",
+ queue->memz_name,
+ queue->queue_size, queue_size_bytes,
+ qp_conf->nb_descriptors, desc_size,
+ queue->max_inflights, queue->modulo_mask);
+
+ return 0;
+
+queue_create_err:
+ rte_memzone_free(qp_mz);
+ return ret;
+}
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes)
+{
+ if (((queue_size_bytes - 1) & phys_addr) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *p_queue_size_for_csr)
+{
+ uint8_t i = ADF_MIN_RING_SIZE;
+
+ for (; i <= ADF_MAX_RING_SIZE; i++)
+ if ((msg_size * msg_num) ==
+ (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
+ *p_queue_size_for_csr = i;
+ return 0;
+ }
+ QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
+ return -EINVAL;
+}
+
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ rte_spinlock_lock(lock);
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value |= (0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
+}
+
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ rte_spinlock_lock(lock);
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value &= ~(0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
+}
+
+static void adf_configure_queues(struct qat_qp *qp)
+{
+ uint32_t queue_config;
+ struct qat_queue *queue = &qp->tx_q;
+
+ queue_config = BUILD_RING_CONFIG(queue->queue_size);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+
+ queue = &qp->rx_q;
+ queue_config =
+ BUILD_RESP_RING_CONFIG(queue->queue_size,
+ ADF_RING_NEAR_WATERMARK_512,
+ ADF_RING_NEAR_WATERMARK_0);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+}
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
+{
+ return data & modulo_mask;
+}
+
+static inline void
+txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, q->tail);
+ q->nb_pending_requests = 0;
+ q->csr_tail = q->tail;
+}
+
+static inline
+void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+{
+ uint32_t old_head, new_head;
+ uint32_t max_head;
+
+ old_head = q->csr_head;
+ new_head = q->head;
+ max_head = qp->nb_descriptors * q->msg_size;
+
+ /* write out free descriptors */
+ void *cur_desc = (uint8_t *)q->base_addr + old_head;
+
+ if (new_head < old_head) {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
+ memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
+ } else {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
+ }
+ q->nb_processed_responses = 0;
+ q->csr_head = new_head;
+
+ /* write current head to CSR */
+ WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, new_head);
+}
+
+uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register int ret;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+ int overflow;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ tmp_qp->inflights16 += nb_ops;
+ overflow = tmp_qp->inflights16 - queue->max_inflights;
+ if (overflow > 0) {
+ tmp_qp->inflights16 -= overflow;
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+
+ while (nb_ops_sent != nb_ops_possible) {
+ ret = tmp_qp->build_request(*ops, base_addr + tail,
+ tmp_qp->op_cookies[tail / queue->msg_size],
+ tmp_qp->qat_dev_gen);
+ if (ret != 0) {
+ tmp_qp->stats.enqueue_err_count++;
+ /*
+ * This message cannot be enqueued,
+ * decrease number of ops that wasn't sent
+ */
+ tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+
+ tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
+ ops++;
+ nb_ops_sent++;
+ }
+kick_tail:
+ queue->tail = tail;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ queue->nb_pending_requests += nb_ops_sent;
+ if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
+ queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
+ txq_write_tail(tmp_qp, queue);
+ }
+ return nb_ops_sent;
+}
+
+uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ struct qat_queue *rx_queue, *tx_queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ uint32_t head;
+ uint32_t resp_counter = 0;
+ uint8_t *resp_msg;
+
+ rx_queue = &(tmp_qp->rx_q);
+ tx_queue = &(tmp_qp->tx_q);
+ head = rx_queue->head;
+ resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
+
+ while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
+ resp_counter != nb_ops) {
+
+ if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
+ qat_sym_process_response(ops, resp_msg);
+ else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
+ qat_comp_process_response(ops, resp_msg);
+
+ head = adf_modulo(head + rx_queue->msg_size,
+ rx_queue->modulo_mask);
+
+ resp_msg = (uint8_t *)rx_queue->base_addr + head;
+ ops++;
+ resp_counter++;
+ }
+ if (resp_counter > 0) {
+ rx_queue->head = head;
+ tmp_qp->stats.dequeued_count += resp_counter;
+ rx_queue->nb_processed_responses += resp_counter;
+ tmp_qp->inflights16 -= resp_counter;
+
+ if (rx_queue->nb_processed_responses >
+ QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(tmp_qp, rx_queue);
+ }
+ /* also check if tail needs to be advanced */
+ if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
+ tx_queue->tail != tx_queue->csr_tail) {
+ txq_write_tail(tmp_qp, tx_queue);
+ }
+ return resp_counter;
+}
+
+__attribute__((weak)) int
+qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+{
+ return 0;
+}
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
new file mode 100644
index 00000000..69f8a613
--- /dev/null
+++ b/drivers/common/qat/qat_qp.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_QP_H_
+#define _QAT_QP_H_
+
+#include "qat_common.h"
+#include "adf_transport_access_macros.h"
+
+struct qat_pci_device;
+
+#define QAT_CSR_HEAD_WRITE_THRESH 32U
+/* number of requests to accumulate before writing head CSR */
+#define QAT_CSR_TAIL_WRITE_THRESH 32U
+/* number of requests to accumulate before writing tail CSR */
+#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
+/* number of inflights below which no tail write coalescing should occur */
+
+typedef int (*build_request_t)(void *op,
+ uint8_t *req, void *op_cookie,
+ enum qat_device_gen qat_dev_gen);
+/**< Build a request from an op. */
+
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_hw_data {
+ enum qat_service_type service_type;
+ uint8_t hw_bundle_num;
+ uint8_t tx_ring_num;
+ uint8_t rx_ring_num;
+ uint16_t tx_msg_size;
+ uint16_t rx_msg_size;
+};
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_config {
+ const struct qat_qp_hw_data *hw;
+ uint32_t nb_descriptors;
+ uint32_t cookie_size;
+ int socket_id;
+ build_request_t build_request;
+ const char *service_str;
+};
+
+/**
+ * Structure associated with each queue.
+ */
+struct qat_queue {
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+ void *base_addr; /* Base address */
+ rte_iova_t base_phys_addr; /* Queue physical address */
+ uint32_t head; /* Shadow copy of the head */
+ uint32_t tail; /* Shadow copy of the tail */
+ uint32_t modulo_mask;
+ uint32_t msg_size;
+ uint16_t max_inflights;
+ uint32_t queue_size;
+ uint8_t hw_bundle_number;
+ uint8_t hw_queue_number;
+ /* HW queue aka ring offset on bundle */
+ uint32_t csr_head; /* last written head value */
+ uint32_t csr_tail; /* last written tail value */
+ uint16_t nb_processed_responses;
+ /* number of responses processed since last CSR head write */
+ uint16_t nb_pending_requests;
+ /* number of requests pending since last CSR tail write */
+};
+
+struct qat_qp {
+ void *mmap_bar_addr;
+ uint16_t inflights16;
+ struct qat_queue tx_q;
+ struct qat_queue rx_q;
+ struct qat_common_stats stats;
+ struct rte_mempool *op_cookie_pool;
+ void **op_cookies;
+ uint32_t nb_descriptors;
+ enum qat_device_gen qat_dev_gen;
+ build_request_t build_request;
+ enum qat_service_type service_type;
+ struct qat_pci_device *qat_dev;
+ /**< qat device this qp is on */
+} __rte_cache_aligned;
+
+extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
+
+uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+int
+qat_qp_release(struct qat_qp **qp_addr);
+
+int
+qat_qp_setup(struct qat_pci_device *qat_dev,
+ struct qat_qp **qp_addr, uint16_t queue_pair_id,
+ struct qat_qp_config *qat_qp_conf);
+
+int
+qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+ enum qat_service_type service);
+
+/* Needed for weak function*/
+int
+qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused);
+
+#endif /* _QAT_QP_H_ */
diff --git a/drivers/compress/Makefile b/drivers/compress/Makefile
index 592497f5..286ea6ee 100644
--- a/drivers/compress/Makefile
+++ b/drivers/compress/Makefile
@@ -4,5 +4,7 @@
include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += octeontx
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/compress/isal/isal_compress_pmd.c b/drivers/compress/isal/isal_compress_pmd.c
index 0f025a3b..e943336b 100644
--- a/drivers/compress/isal/isal_compress_pmd.c
+++ b/drivers/compress/isal/isal_compress_pmd.c
@@ -188,6 +188,179 @@ isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
return 0;
}
+/* Compression using chained mbufs for input/output data */
+static int
+chained_mbuf_compression(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret;
+ uint32_t remaining_offset;
+ uint32_t remaining_data = op->src.length;
+ struct rte_mbuf *src = op->m_src;
+ struct rte_mbuf *dst = op->m_dst;
+
+ /* check for source/destination offset passing multiple segments
+ * and point compression stream to input/output buffer.
+ */
+ remaining_offset = op->src.offset;
+ while (remaining_offset >= src->data_len) {
+ remaining_offset -= src->data_len;
+ src = src->next;
+ }
+ qp->stream->avail_in = RTE_MIN(src->data_len - remaining_offset,
+ op->src.length);
+ qp->stream->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *,
+ remaining_offset);
+
+ remaining_offset = op->dst.offset;
+ while (remaining_offset >= dst->data_len) {
+ remaining_offset -= dst->data_len;
+ dst = dst->next;
+ }
+ qp->stream->avail_out = dst->data_len - remaining_offset;
+ qp->stream->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *,
+ remaining_offset);
+
+ if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination buffer\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ while (qp->stream->internal_state.state != ZSTATE_END) {
+ /* Last segment of data */
+ if (remaining_data <= src->data_len)
+ qp->stream->end_of_stream = 1;
+
+ /* Execute compression operation */
+ ret = isal_deflate(qp->stream);
+
+ remaining_data = op->src.length - qp->stream->total_in;
+
+ if (ret != COMP_OK) {
+ ISAL_PMD_LOG(ERR, "Compression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ if (qp->stream->avail_in == 0 &&
+ qp->stream->total_in != op->src.length) {
+ if (src->next != NULL) {
+ src = src->next;
+ qp->stream->next_in =
+ rte_pktmbuf_mtod(src, uint8_t *);
+ qp->stream->avail_in =
+ RTE_MIN(remaining_data, src->data_len);
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough input buffer segments\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+ }
+
+ if (qp->stream->avail_out == 0 &&
+ qp->stream->internal_state.state != ZSTATE_END) {
+ if (dst->next != NULL) {
+ dst = dst->next;
+ qp->stream->next_out =
+ rte_pktmbuf_mtod(dst, uint8_t *);
+ qp->stream->avail_out = dst->data_len;
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough output buffer segments\n");
+ op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Decompression using chained mbufs for input/output data */
+static int
+chained_mbuf_decompression(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret;
+ uint32_t consumed_data, src_remaining_offset, dst_remaining_offset;
+ uint32_t remaining_data = op->src.length;
+ struct rte_mbuf *src = op->m_src;
+ struct rte_mbuf *dst = op->m_dst;
+
+ /* check for offset passing multiple segments
+ * and point decompression state to input/output buffer
+ */
+ src_remaining_offset = op->src.offset;
+ while (src_remaining_offset >= src->data_len) {
+ src_remaining_offset -= src->data_len;
+ src = src->next;
+ }
+ qp->state->avail_in = RTE_MIN(src->data_len - src_remaining_offset,
+ op->src.length);
+ qp->state->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *,
+ src_remaining_offset);
+
+ dst_remaining_offset = op->dst.offset;
+ while (dst_remaining_offset >= dst->data_len) {
+ dst_remaining_offset -= dst->data_len;
+ dst = dst->next;
+ }
+ qp->state->avail_out = dst->data_len - dst_remaining_offset;
+ qp->state->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *,
+ dst_remaining_offset);
+
+ while (qp->state->block_state != ISAL_BLOCK_FINISH) {
+
+ ret = isal_inflate(qp->state);
+
+ /* Check for first segment, offset needs to be accounted for */
+ if (remaining_data == op->src.length) {
+ consumed_data = src->data_len - qp->state->avail_in -
+ src_remaining_offset;
+ } else
+ consumed_data = src->data_len - qp->state->avail_in;
+
+ op->consumed += consumed_data;
+ remaining_data -= consumed_data;
+
+ if (ret != ISAL_DECOMP_OK) {
+ ISAL_PMD_LOG(ERR, "Decompression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ if (qp->state->avail_in == 0
+ && op->consumed != op->src.length) {
+ if (src->next != NULL) {
+ src = src->next;
+ qp->state->next_in =
+ rte_pktmbuf_mtod(src, uint8_t *);
+ qp->state->avail_in =
+ RTE_MIN(remaining_data, src->data_len);
+ }
+ }
+
+ if (qp->state->avail_out == 0 &&
+ qp->state->block_state != ISAL_BLOCK_FINISH) {
+ if (dst->next != NULL) {
+ dst = dst->next;
+ qp->state->next_out =
+ rte_pktmbuf_mtod(dst, uint8_t *);
+ qp->state->avail_out = dst->data_len;
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough output buffer segments\n");
+ op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
/* Stateless Compression Function */
static int
process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
@@ -207,23 +380,10 @@ process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
/* Stateless operation, input will be consumed in one go */
qp->stream->flush = NO_FLUSH;
- /* set op level & intermediate level buffer */
+ /* set compression level & intermediate level buffer size */
qp->stream->level = priv_xform->compress.level;
qp->stream->level_buf_size = priv_xform->level_buffer_size;
- /* Point compression stream structure to input/output buffers */
- qp->stream->avail_in = op->src.length;
- qp->stream->next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
- qp->stream->avail_out = op->m_dst->data_len;
- qp->stream->next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
- qp->stream->end_of_stream = 1; /* All input consumed in one go */
-
- if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
- ISAL_PMD_LOG(ERR, "Invalid source or destination buffers\n");
- op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
- return -1;
- }
-
/* Set op huffman code */
if (priv_xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
isal_deflate_set_hufftables(qp->stream, NULL,
@@ -238,30 +398,70 @@ process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
isal_deflate_set_hufftables(qp->stream, NULL,
IGZIP_HUFFTABLE_DEFAULT);
- /* Execute compression operation */
- ret = isal_deflate_stateless(qp->stream);
-
- /* Check that output buffer did not run out of space */
- if (ret == STATELESS_OVERFLOW) {
- ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
- op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
- return ret;
+ if (op->m_src->pkt_len < (op->src.length + op->src.offset)) {
+ ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
}
- /* Check that input buffer has been fully consumed */
- if (qp->stream->avail_in != (uint32_t)0) {
- ISAL_PMD_LOG(ERR, "Input buffer could not be read entirely\n");
- op->status = RTE_COMP_OP_STATUS_ERROR;
+ if (op->dst.offset >= op->m_dst->pkt_len) {
+ ISAL_PMD_LOG(ERR, "Output mbuf(s) not big enough"
+ " for offset provided.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
return -1;
}
- if (ret != COMP_OK) {
- op->status = RTE_COMP_OP_STATUS_ERROR;
- return ret;
- }
+ /* Chained mbufs */
+ if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) {
+ ret = chained_mbuf_compression(op, qp);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Linear buffer */
+ qp->stream->end_of_stream = 1; /* All input consumed in one */
+ /* Point compression stream to input buffer */
+ qp->stream->avail_in = op->src.length;
+ qp->stream->next_in = rte_pktmbuf_mtod_offset(op->m_src,
+ uint8_t *, op->src.offset);
+
+ /* Point compression stream to output buffer */
+ qp->stream->avail_out = op->m_dst->data_len - op->dst.offset;
+ qp->stream->next_out = rte_pktmbuf_mtod_offset(op->m_dst,
+ uint8_t *, op->dst.offset);
+
+ if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination"
+ " buffers\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
- op->consumed = qp->stream->total_in;
- op->produced = qp->stream->total_out;
+ /* Execute compression operation */
+ ret = isal_deflate_stateless(qp->stream);
+
+ /* Check that output buffer did not run out of space */
+ if (ret == STATELESS_OVERFLOW) {
+ ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return ret;
+ }
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->stream->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read"
+ " entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != COMP_OK) {
+ ISAL_PMD_LOG(ERR, "Compression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+ }
+ op->consumed = qp->stream->total_in;
+ op->produced = qp->stream->total_out;
return ret;
}
@@ -277,43 +477,69 @@ process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp)
/* Initialize decompression state */
isal_inflate_init(qp->state);
- /* Point decompression state structure to input/output buffers */
- qp->state->avail_in = op->src.length;
- qp->state->next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
- qp->state->avail_out = op->m_dst->data_len;
- qp->state->next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
+ if (op->m_src->pkt_len < (op->src.length + op->src.offset)) {
+ ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
- if (unlikely(!qp->state->next_in || !qp->state->next_out)) {
- ISAL_PMD_LOG(ERR, "Invalid source or destination buffers\n");
+ if (op->dst.offset >= op->m_dst->pkt_len) {
+ ISAL_PMD_LOG(ERR, "Output mbuf not big enough for "
+ "offset provided.\n");
op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
return -1;
}
- /* Execute decompression operation */
- ret = isal_inflate_stateless(qp->state);
+ /* Chained mbufs */
+ if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) {
+ ret = chained_mbuf_decompression(op, qp);
+ if (ret != 0)
+ return ret;
+ } else {
+ /* Linear buffer */
+ /* Point decompression state to input buffer */
+ qp->state->avail_in = op->src.length;
+ qp->state->next_in = rte_pktmbuf_mtod_offset(op->m_src,
+ uint8_t *, op->src.offset);
+
+ /* Point decompression state to output buffer */
+ qp->state->avail_out = op->m_dst->data_len - op->dst.offset;
+ qp->state->next_out = rte_pktmbuf_mtod_offset(op->m_dst,
+ uint8_t *, op->dst.offset);
+
+ if (unlikely(!qp->state->next_in || !qp->state->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination"
+ " buffers\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
- if (ret == ISAL_OUT_OVERFLOW) {
- ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
- op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
- return ret;
- }
+ /* Execute decompression operation */
+ ret = isal_inflate_stateless(qp->state);
- /* Check that input buffer has been fully consumed */
- if (qp->state->avail_in != (uint32_t)0) {
- ISAL_PMD_LOG(ERR, "Input buffer could not be read entirely\n");
- op->status = RTE_COMP_OP_STATUS_ERROR;
- return -1;
- }
+ if (ret == ISAL_OUT_OVERFLOW) {
+ ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return ret;
+ }
- if (ret != ISAL_DECOMP_OK) {
- op->status = RTE_COMP_OP_STATUS_ERROR;
- return ret;
- }
+ /* Check that input buffer has been fully consumed */
+ if (qp->state->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read"
+ " entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
- op->consumed = op->src.length - qp->state->avail_in;
- op->produced = qp->state->total_out;
+ if (ret != ISAL_DECOMP_OK) {
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+ op->consumed = op->src.length - qp->state->avail_in;
+ }
+ op->produced = qp->state->total_out;
-return ret;
+ return ret;
}
/* Process compression/decompression operation */
@@ -460,12 +686,9 @@ RTE_PMD_REGISTER_VDEV(COMPDEV_NAME_ISAL_PMD, compdev_isal_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(COMPDEV_NAME_ISAL_PMD,
"socket_id=<int>");
-RTE_INIT(isal_init_log);
-
-static void
-isal_init_log(void)
+RTE_INIT(isal_init_log)
{
- isal_logtype_driver = rte_log_register("comp_isal");
+ isal_logtype_driver = rte_log_register("pmd.compress.isal");
if (isal_logtype_driver >= 0)
rte_log_set_level(isal_logtype_driver, RTE_LOG_INFO);
}
diff --git a/drivers/compress/isal/isal_compress_pmd_ops.c b/drivers/compress/isal/isal_compress_pmd_ops.c
index 970a0413..41cade87 100644
--- a/drivers/compress/isal/isal_compress_pmd_ops.c
+++ b/drivers/compress/isal/isal_compress_pmd_ops.c
@@ -12,7 +12,12 @@
static const struct rte_compressdev_capabilities isal_pmd_capabilities[] = {
{
.algo = RTE_COMP_ALGO_DEFLATE,
- .comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
+ .comp_feature_flags = RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC,
.window_size = {
.min = 15,
.max = 15,
@@ -165,8 +170,11 @@ isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
if (qp->state != NULL)
rte_free(qp->state);
- if (dev->data->queue_pairs[qp_id] != NULL)
- rte_free(dev->data->queue_pairs[qp_id]);
+ if (qp->processed_pkts != NULL)
+ rte_ring_free(qp->processed_pkts);
+
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
return 0;
}
diff --git a/drivers/compress/meson.build b/drivers/compress/meson.build
index fb136e1b..817ef3be 100644
--- a/drivers/compress/meson.build
+++ b/drivers/compress/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
-drivers = ['isal']
+drivers = ['isal', 'octeontx', 'qat', 'zlib']
std_deps = ['compressdev'] # compressdev pulls in all other needed deps
config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
diff --git a/drivers/compress/octeontx/Makefile b/drivers/compress/octeontx/Makefile
new file mode 100644
index 00000000..f34424c8
--- /dev/null
+++ b/drivers/compress/octeontx/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_octeontx_zip.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -I$(RTE_SDK)/drivers/compress/octeontx/include
+
+# external library include paths
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += otx_zip_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += otx_zip.c
+
+# versioning export map
+EXPORT_MAP := rte_pmd_octeontx_compress_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/compress/octeontx/include/zip_regs.h b/drivers/compress/octeontx/include/zip_regs.h
new file mode 100644
index 00000000..1e74db43
--- /dev/null
+++ b/drivers/compress/octeontx/include/zip_regs.h
@@ -0,0 +1,711 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _RTE_OCTEONTX_ZIP_REGS_H_
+#define _RTE_OCTEONTX_ZIP_REGS_H_
+
+
+/**
+ * Enumeration zip_cc
+ *
+ * ZIP compression coding Enumeration
+ * Enumerates ZIP_INST_S[CC].
+ */
+enum {
+ ZIP_CC_DEFAULT = 0,
+ ZIP_CC_DYN_HUFF,
+ ZIP_CC_FIXED_HUFF,
+ ZIP_CC_LZS
+} zip_cc;
+
+/**
+ * Register (NCB) zip_vq#_ena
+ *
+ * ZIP VF Queue Enable Register
+ * If a queue is disabled, ZIP CTL stops fetching instructions from the queue.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_vqx_ena_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1;
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_vqx_ena_s cn; */
+} zip_vqx_ena_t;
+
+/**
+ * Register (NCB) zip_vq#_sbuf_addr
+ *
+ * ZIP VF Queue Starting Buffer Address Registers
+ * These registers set the buffer parameters for the instruction queues.
+ * When quiescent (i.e.
+ * outstanding doorbell count is 0), it is safe to rewrite this register
+ * to effectively reset the
+ * command buffer state machine.
+ * These registers must be programmed after software programs the
+ * corresponding ZIP_QUE()_SBUF_CTL.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_vqx_sbuf_addr_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptr : 42;
+ uint64_t off : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t off : 7;
+ uint64_t ptr : 42;
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_vqx_sbuf_addr_s cn; */
+} zip_vqx_sbuf_addr_t;
+
+/**
+ * Register (NCB) zip_que#_doorbell
+ *
+ * ZIP Queue Doorbell Registers
+ * Doorbells for the ZIP instruction queues.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_quex_doorbell_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t dbell_cnt : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t dbell_cnt : 20;
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_quex_doorbell_s cn; */
+} zip_quex_doorbell_t;
+
+/**
+ * Structure zip_nptr_s
+ *
+ * ZIP Instruction Next-Chunk-Buffer Pointer (NPTR) Structure
+ * This structure is used to chain all the ZIP instruction buffers
+ * together. ZIP instruction buffers are managed
+ * (allocated and released) by software.
+ */
+union zip_nptr_s {
+ uint64_t u;
+ struct zip_nptr_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_nptr_s_s cn83xx; */
+};
+
+/**
+ * generic ptr address
+ */
+union zip_zptr_addr_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u;
+ /** generic ptr address */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/**
+ * generic ptr ctl
+ */
+union zip_zptr_ctl_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u;
+ /** generic ptr ctl */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ uint64_t reserved_112_127 : 16;
+ uint64_t length : 16;
+ uint64_t reserved_67_95 : 29;
+ uint64_t fw : 1;
+ uint64_t nc : 1;
+ uint64_t data_be : 1;
+#else /* Word 1 - Little Endian */
+ uint64_t data_be : 1;
+ uint64_t nc : 1;
+ uint64_t fw : 1;
+ uint64_t reserved_67_95 : 29;
+ uint64_t length : 16;
+ uint64_t reserved_112_127 : 16;
+#endif /* Word 1 - End */
+ } s;
+
+};
+
+/**
+ * Structure zip_inst_s
+ *
+ * ZIP Instruction Structure
+ * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15
+ * within the structure).
+ */
+union zip_inst_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[16];
+ /** ZIP Instruction Structure */
+ struct zip_inst_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Done interrupt */
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_56_62 : 7;
+ /** Total output length */
+ uint64_t totaloutputlength : 24;
+ /** reserved */
+ uint64_t reserved_27_31 : 5;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** HASH IV */
+ uint64_t iv : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** Hash more-in-file */
+ uint64_t hmif : 1;
+ /** Hash Algorithm and enable */
+ uint64_t halg : 3;
+ /** Sync flush*/
+ uint64_t sf : 1;
+ /** Compression speed/storage */
+ uint64_t ss : 2;
+ /** Compression coding */
+ uint64_t cc : 2;
+ /** End of input data */
+ uint64_t ef : 1;
+ /** Beginning of file */
+ uint64_t bf : 1;
+ // uint64_t reserved_3_4 : 2;
+ /** Comp/decomp operation */
+ uint64_t op : 2;
+ /** Data sactter */
+ uint64_t ds : 1;
+ /** Data gather */
+ uint64_t dg : 1;
+ /** History gather */
+ uint64_t hg : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t hg : 1;
+ uint64_t dg : 1;
+ uint64_t ds : 1;
+ //uint64_t reserved_3_4 : 2;
+ uint64_t op : 2;
+ uint64_t bf : 1;
+ uint64_t ef : 1;
+ uint64_t cc : 2;
+ uint64_t ss : 2;
+ uint64_t sf : 1;
+ uint64_t halg : 3;
+ uint64_t hmif : 1;
+ uint64_t exbits : 7;
+ uint64_t iv : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_27_31 : 5;
+ uint64_t totaloutputlength : 24;
+ uint64_t reserved_56_62 : 7;
+ uint64_t doneint : 1;
+
+#endif /* Word 0 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** History length */
+ uint64_t historylength : 16;
+ /** reserved */
+ uint64_t reserved_96_111 : 16;
+ /** adler/crc32 checksum*/
+ uint64_t adlercrc32 : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t adlercrc32 : 32;
+ uint64_t reserved_96_111 : 16;
+ uint64_t historylength : 16;
+#endif /* Word 1 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Decompression Context Pointer Address */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#else /* Word 2 - Little Endian */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#endif /* Word 2 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression Context Pointer Control */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#else /* Word 3 - Little Endian */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#endif /* Word 3 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression history pointer address */
+ union zip_zptr_addr_s his_ptr_addr;
+#else /* Word 4 - Little Endian */
+ union zip_zptr_addr_s his_ptr_addr;
+#endif /* Word 4 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression history pointer control */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#else /* Word 5 - Little Endian */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#endif /* Word 5 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Input and compression history pointer address */
+ union zip_zptr_addr_s inp_ptr_addr;
+#else /* Word 6 - Little Endian */
+ union zip_zptr_addr_s inp_ptr_addr;
+#endif /* Word 6 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Input and compression history pointer control */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#else /* Word 7 - Little Endian */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#endif /* Word 7 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Output pointer address */
+ union zip_zptr_addr_s out_ptr_addr;
+#else /* Word 8 - Little Endian */
+ union zip_zptr_addr_s out_ptr_addr;
+#endif /* Word 8 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Output pointer control */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#else /* Word 9 - Little Endian */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#endif /* Word 9 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Result pointer address */
+ union zip_zptr_addr_s res_ptr_addr;
+#else /* Word 10 - Little Endian */
+ union zip_zptr_addr_s res_ptr_addr;
+#endif /* Word 10 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Result pointer control */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#else /* Word 11 - Little Endian */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#endif /* Word 11 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 12 - Big Endian */
+ /** reserved */
+ uint64_t reserved_812_831 : 20;
+ /** SSO guest group */
+ uint64_t ggrp : 10;
+ /** SSO tag type */
+ uint64_t tt : 2;
+ /** SSO tag */
+ uint64_t tag : 32;
+#else /* Word 12 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t ggrp : 10;
+ uint64_t reserved_812_831 : 20;
+#endif /* Word 12 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 13 - Big Endian */
+ /** Work queue entry pointer */
+ uint64_t wq_ptr : 64;
+#else /* Word 13 - Little Endian */
+ uint64_t wq_ptr : 64;
+#endif /* Word 13 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** reserved */
+ uint64_t reserved_896_959 : 64;
+#else /* Word 14 - Little Endian */
+ uint64_t reserved_896_959 : 64;
+#endif /* Word 14 - End */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Hash structure pointer */
+ uint64_t hash_ptr : 64;
+#else /* Word 15 - Little Endian */
+ uint64_t hash_ptr : 64;
+#endif /* Word 15 - End */
+ } /** ZIP 88xx Instruction Structure */zip88xx;
+
+ /** ZIP Instruction Structure */
+ struct zip_inst_s_cn83xx {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Done interrupt */
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_56_62 : 7;
+ /** Total output length */
+ uint64_t totaloutputlength : 24;
+ /** reserved */
+ uint64_t reserved_27_31 : 5;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** HASH IV */
+ uint64_t iv : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** Hash more-in-file */
+ uint64_t hmif : 1;
+ /** Hash Algorithm and enable */
+ uint64_t halg : 3;
+ /** Sync flush*/
+ uint64_t sf : 1;
+ /** Compression speed/storage */
+ uint64_t ss : 2;
+ /** Compression coding */
+ uint64_t cc : 2;
+ /** End of input data */
+ uint64_t ef : 1;
+ /** Beginning of file */
+ uint64_t bf : 1;
+ /** Comp/decomp operation */
+ uint64_t op : 2;
+ /** Data sactter */
+ uint64_t ds : 1;
+ /** Data gather */
+ uint64_t dg : 1;
+ /** History gather */
+ uint64_t hg : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t hg : 1;
+ uint64_t dg : 1;
+ uint64_t ds : 1;
+ uint64_t op : 2;
+ uint64_t bf : 1;
+ uint64_t ef : 1;
+ uint64_t cc : 2;
+ uint64_t ss : 2;
+ uint64_t sf : 1;
+ uint64_t halg : 3;
+ uint64_t hmif : 1;
+ uint64_t exbits : 7;
+ uint64_t iv : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_27_31 : 5;
+ uint64_t totaloutputlength : 24;
+ uint64_t reserved_56_62 : 7;
+ uint64_t doneint : 1;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** History length */
+ uint64_t historylength : 16;
+ /** reserved */
+ uint64_t reserved_96_111 : 16;
+ /** adler/crc32 checksum*/
+ uint64_t adlercrc32 : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t adlercrc32 : 32;
+ uint64_t reserved_96_111 : 16;
+ uint64_t historylength : 16;
+#endif /* Word 1 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Decompression Context Pointer Address */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#else /* Word 2 - Little Endian */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#endif /* Word 2 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 3 - Big Endian */
+ /** Decompression Context Pointer Control */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#else /* Word 3 - Little Endian */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#endif /* Word 3 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 4 - Big Endian */
+ /** Decompression history pointer address */
+ union zip_zptr_addr_s his_ptr_addr;
+#else /* Word 4 - Little Endian */
+ union zip_zptr_addr_s his_ptr_addr;
+#endif /* Word 4 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 5 - Big Endian */
+ /** Decompression history pointer control */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#else /* Word 5 - Little Endian */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#endif /* Word 5 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 6 - Big Endian */
+ /** Input and compression history pointer address */
+ union zip_zptr_addr_s inp_ptr_addr;
+#else /* Word 6 - Little Endian */
+ union zip_zptr_addr_s inp_ptr_addr;
+#endif /* Word 6 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 7 - Big Endian */
+ /** Input and compression history pointer control */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#else /* Word 7 - Little Endian */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#endif /* Word 7 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 8 - Big Endian */
+ /** Output pointer address */
+ union zip_zptr_addr_s out_ptr_addr;
+#else /* Word 8 - Little Endian */
+ union zip_zptr_addr_s out_ptr_addr;
+#endif /* Word 8 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 9 - Big Endian */
+ /** Output pointer control */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#else /* Word 9 - Little Endian */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#endif /* Word 9 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 10 - Big Endian */
+ /** Result pointer address */
+ union zip_zptr_addr_s res_ptr_addr;
+#else /* Word 10 - Little Endian */
+ union zip_zptr_addr_s res_ptr_addr;
+#endif /* Word 10 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 11 - Big Endian */
+ /** Result pointer control */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#else /* Word 11 - Little Endian */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#endif /* Word 11 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 12 - Big Endian */
+ /** reserved */
+ uint64_t reserved_812_831 : 20;
+ /** SSO guest group */
+ uint64_t ggrp : 10;
+ /** SSO tag type */
+ uint64_t tt : 2;
+ /** SSO tag */
+ uint64_t tag : 32;
+#else /* Word 12 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t ggrp : 10;
+ uint64_t reserved_812_831 : 20;
+#endif /* Word 12 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 13 - Big Endian */
+ /** Work queue entry pointer */
+ uint64_t wq_ptr : 64;
+#else /* Word 13 - Little Endian */
+ uint64_t wq_ptr : 64;
+#endif /* Word 13 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 14 - Big Endian */
+ /** reserved */
+ uint64_t reserved_896_959 : 64;
+#else /* Word 14 - Little Endian */
+ uint64_t reserved_896_959 : 64;
+#endif /* Word 14 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 15 - Big Endian */
+ /** Hash structure pointer */
+ uint64_t hash_ptr : 64;
+#else /* Word 15 - Little Endian */
+ uint64_t hash_ptr : 64;
+#endif /* Word 15 - End */
+ } /** ZIP 83xx Instruction Structure */s;
+};
+
+/**
+ * Structure zip_zres_s
+ *
+ * ZIP Result Structure
+ * The ZIP coprocessor writes the result structure after it completes the
+ * invocation. The result structure is exactly 24 bytes, and each invocation
+ * of the ZIP coprocessor produces exactly one result structure.
+ */
+union zip_zres_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[8];
+ /** ZIP Result Structure */
+ struct zip_zres_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** crc32 checksum of uncompressed stream */
+ uint64_t crc32 : 32;
+ /** adler32 checksum of uncompressed stream*/
+ uint64_t adler32 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t adler32 : 32;
+ uint64_t crc32 : 32;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** Total numer of Bytes produced in output stream */
+ uint64_t totalbyteswritten : 32;
+ /** Total number of bytes processed from the input stream */
+ uint64_t totalbytesread : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t totalbytesread : 32;
+ uint64_t totalbyteswritten : 32;
+#endif /* Word 1 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Total number of compressed input bits
+ * consumed to decompress all blocks in the file
+ */
+ uint64_t totalbitsprocessed : 32;
+ /** Done interrupt*/
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_155_158 : 4;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** reserved */
+ uint64_t reserved_151 : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** reserved */
+ uint64_t reserved_137_143 : 7;
+ /** End of file */
+ uint64_t ef : 1;
+ /** Completion/error code */
+ uint64_t compcode : 8;
+#else /* Word 2 - Little Endian */
+ uint64_t compcode : 8;
+ uint64_t ef : 1;
+ uint64_t reserved_137_143 : 7;
+ uint64_t exbits : 7;
+ uint64_t reserved_151 : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_155_158 : 4;
+ uint64_t doneint : 1;
+ uint64_t totalbitsprocessed : 32;
+#endif /* Word 2 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 3 - Big Endian */
+ /** reserved */
+ uint64_t reserved_253_255 : 3;
+ /** Hash length in bytes */
+ uint64_t hshlen : 61;
+#else /* Word 3 - Little Endian */
+ uint64_t hshlen : 61;
+ uint64_t reserved_253_255 : 3;
+#endif /* Word 3 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 4 - Big Endian */
+ /** Double-word 0 of computed hash */
+ uint64_t hash0 : 64;
+#else /* Word 4 - Little Endian */
+ uint64_t hash0 : 64;
+#endif /* Word 4 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 5 - Big Endian */
+ /** Double-word 1 of computed hash */
+ uint64_t hash1 : 64;
+#else /* Word 5 - Little Endian */
+ uint64_t hash1 : 64;
+#endif /* Word 5 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 6 - Big Endian */
+ /** Double-word 2 of computed hash */
+ uint64_t hash2 : 64;
+#else /* Word 6 - Little Endian */
+ uint64_t hash2 : 64;
+#endif /* Word 6 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 7 - Big Endian */
+ /** Double-word 3 of computed hash */
+ uint64_t hash3 : 64;
+#else /* Word 7 - Little Endian */
+ uint64_t hash3 : 64;
+#endif /* Word 7 - End */
+ } /** ZIP Result Structure */s;
+
+ /* struct zip_zres_s_s cn83xx; */
+};
+
+/**
+ * Structure zip_zptr_s
+ *
+ * ZIP Generic Pointer Structure
+ * This structure is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[2];
+ /** ZIP Generic Pointer Structure */
+ struct zip_zptr_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Pointer to Data or scatter-gather list */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** reserved */
+ uint64_t reserved_112_127 : 16;
+ /** Length of Data or scatter-gather list*/
+ uint64_t length : 16;
+ /** reserved */
+ uint64_t reserved_67_95 : 29;
+ /** Full-block write */
+ uint64_t fw : 1;
+ /** No cache allocation */
+ uint64_t nc : 1;
+ /** reserved */
+ uint64_t data_be : 1;
+#else /* Word 1 - Little Endian */
+ uint64_t data_be : 1;
+ uint64_t nc : 1;
+ uint64_t fw : 1;
+ uint64_t reserved_67_95 : 29;
+ uint64_t length : 16;
+ uint64_t reserved_112_127 : 16;
+#endif /* Word 1 - End */
+ } /** ZIP Generic Pointer Structure */s;
+};
+
+/**
+ * Enumeration zip_comp_e
+ *
+ * ZIP Completion Enumeration
+ * Enumerates the values of ZIP_ZRES_S[COMPCODE].
+ */
+#define ZIP_COMP_E_NOTDONE (0)
+#define ZIP_COMP_E_SUCCESS (1)
+#define ZIP_COMP_E_DTRUNC (2)
+#define ZIP_COMP_E_DSTOP (3)
+#define ZIP_COMP_E_ITRUNC (4)
+#define ZIP_COMP_E_RBLOCK (5)
+#define ZIP_COMP_E_NLEN (6)
+#define ZIP_COMP_E_BADCODE (7)
+#define ZIP_COMP_E_BADCODE2 (8)
+#define ZIP_COMP_E_ZERO_LEN (9)
+#define ZIP_COMP_E_PARITY (0xa)
+#define ZIP_COMP_E_FATAL (0xb)
+#define ZIP_COMP_E_TIMEOUT (0xc)
+#define ZIP_COMP_E_INSTR_ERR (0xd)
+#define ZIP_COMP_E_HCTX_ERR (0xe)
+#define ZIP_COMP_E_STOP (3)
+
+/**
+ * Enumeration zip_op_e
+ *
+ * ZIP Operation Enumeration
+ * Enumerates ZIP_INST_S[OP].
+ * Internal:
+ */
+#define ZIP_OP_E_DECOMP (0)
+#define ZIP_OP_E_NOCOMP (1)
+#define ZIP_OP_E_COMP (2)
+
+/**
+ * Enumeration zip compression levels
+ *
+ * ZIP Compression Level Enumeration
+ * Enumerates ZIP_INST_S[SS].
+ * Internal:
+ */
+#define ZIP_COMP_E_LEVEL_MAX (0)
+#define ZIP_COMP_E_LEVEL_MED (1)
+#define ZIP_COMP_E_LEVEL_LOW (2)
+#define ZIP_COMP_E_LEVEL_MIN (3)
+
+#endif /* _RTE_ZIP_REGS_H_ */
diff --git a/drivers/compress/octeontx/meson.build b/drivers/compress/octeontx/meson.build
new file mode 100644
index 00000000..7cd202d0
--- /dev/null
+++ b/drivers/compress/octeontx/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+name = 'octeontx_compress'
+sources = files('otx_zip.c', 'otx_zip_pmd.c')
+allow_experimental_apis = true
+includes += include_directories('include')
+deps += ['mempool_octeontx', 'bus_pci']
+ext_deps += dep
diff --git a/drivers/compress/octeontx/otx_zip.c b/drivers/compress/octeontx/otx_zip.c
new file mode 100644
index 00000000..a9046ff3
--- /dev/null
+++ b/drivers/compress/octeontx/otx_zip.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include "otx_zip.h"
+
+uint64_t
+zip_reg_read64(uint8_t *hw_addr, uint64_t offset)
+{
+ uint8_t *base = hw_addr;
+ return *(volatile uint64_t *)(base + offset);
+}
+
+void
+zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val)
+{
+ uint8_t *base = hw_addr;
+ *(uint64_t *)(base + offset) = val;
+}
+
+static void
+zip_q_enable(struct zipvf_qp *qp)
+{
+ zip_vqx_ena_t que_ena;
+
+ /*ZIP VFx command queue init*/
+ que_ena.u = 0ull;
+ que_ena.s.ena = 1;
+
+ zip_reg_write64(qp->vf->vbar0, ZIP_VQ_ENA, que_ena.u);
+ rte_wmb();
+}
+
+/* initialize given qp on zip device */
+int
+zipvf_q_init(struct zipvf_qp *qp)
+{
+ zip_vqx_sbuf_addr_t que_sbuf_addr;
+
+ uint64_t size;
+ void *cmdq_addr;
+ uint64_t iova;
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ struct zip_vf *vf = qp->vf;
+
+ /* allocate and setup instruction queue */
+ size = ZIP_MAX_CMDQ_SIZE;
+ size = ZIP_ALIGN_ROUNDUP(size, ZIP_CMDQ_ALIGN);
+
+ cmdq_addr = rte_zmalloc(qp->name, size, ZIP_CMDQ_ALIGN);
+ if (cmdq_addr == NULL)
+ return -1;
+
+ cmdq->sw_head = (uint64_t *)cmdq_addr;
+ cmdq->va = (uint8_t *)cmdq_addr;
+ iova = rte_mem_virt2iova(cmdq_addr);
+
+ cmdq->iova = iova;
+
+ que_sbuf_addr.u = 0ull;
+ que_sbuf_addr.s.ptr = (cmdq->iova >> 7);
+ zip_reg_write64(vf->vbar0, ZIP_VQ_SBUF_ADDR, que_sbuf_addr.u);
+
+ zip_q_enable(qp);
+
+ memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
+ rte_spinlock_init(&cmdq->qlock);
+
+ return 0;
+}
+
+int
+zipvf_q_term(struct zipvf_qp *qp)
+{
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ zip_vqx_ena_t que_ena;
+ struct zip_vf *vf = qp->vf;
+
+ if (cmdq->va != NULL) {
+ memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
+ rte_free(cmdq->va);
+ }
+
+ /*Disabling the ZIP queue*/
+ que_ena.u = 0ull;
+ zip_reg_write64(vf->vbar0, ZIP_VQ_ENA, que_ena.u);
+
+ return 0;
+}
+
+void
+zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *cmd)
+{
+ zip_quex_doorbell_t dbell;
+ union zip_nptr_s ncp;
+ uint64_t *ncb_ptr;
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ void *reg_base = qp->vf->vbar0;
+
+ /*Held queue lock*/
+ rte_spinlock_lock(&(cmdq->qlock));
+
+ /* Check space availability in zip cmd queue */
+ if ((((cmdq->sw_head - (uint64_t *)cmdq->va) * sizeof(uint64_t *)) +
+ ZIP_CMD_SIZE) == (ZIP_MAX_CMDQ_SIZE - ZIP_MAX_NCBP_SIZE)) {
+ /*Last buffer of the command queue*/
+ memcpy((uint8_t *)cmdq->sw_head,
+ (uint8_t *)cmd,
+ sizeof(union zip_inst_s));
+ /* move pointer to next loc in unit of 64-bit word */
+ cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+
+ /* now, point the "Next-Chunk Buffer Ptr" to sw_head */
+ ncb_ptr = cmdq->sw_head;
+ /* Pointing head again to cmdqueue base*/
+ cmdq->sw_head = (uint64_t *)cmdq->va;
+
+ ncp.u = 0ull;
+ ncp.s.addr = cmdq->iova;
+ *ncb_ptr = ncp.u;
+ } else {
+ /*Enough buffers available in the command queue*/
+ memcpy((uint8_t *)cmdq->sw_head,
+ (uint8_t *)cmd,
+ sizeof(union zip_inst_s));
+ cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+ }
+
+ rte_wmb();
+
+ /* Ringing ZIP VF doorbell */
+ dbell.u = 0ull;
+ dbell.s.dbell_cnt = 1;
+ zip_reg_write64(reg_base, ZIP_VQ_DOORBELL, dbell.u);
+
+ rte_spinlock_unlock(&(cmdq->qlock));
+}
+
+int
+zipvf_create(struct rte_compressdev *compressdev)
+{
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(compressdev->device);
+ struct zip_vf *zipvf = NULL;
+ char *dev_name = compressdev->data->name;
+ void *vbar0;
+ uint64_t reg;
+
+ if (pdev->mem_resource[0].phys_addr == 0ULL)
+ return -EIO;
+
+ vbar0 = pdev->mem_resource[0].addr;
+ if (!vbar0) {
+ ZIP_PMD_ERR("Failed to map BAR0 of %s", dev_name);
+ return -ENODEV;
+ }
+
+ zipvf = (struct zip_vf *)(compressdev->data->dev_private);
+
+ if (!zipvf)
+ return -ENOMEM;
+
+ zipvf->vbar0 = vbar0;
+ reg = zip_reg_read64(zipvf->vbar0, ZIP_VF_PF_MBOXX(0));
+ /* Storing domain in local to ZIP VF */
+ zipvf->dom_sdom = reg;
+ zipvf->pdev = pdev;
+ zipvf->max_nb_queue_pairs = ZIP_MAX_VF_QUEUE;
+ return 0;
+}
+
+int
+zipvf_destroy(struct rte_compressdev *compressdev)
+{
+ struct zip_vf *vf = (struct zip_vf *)(compressdev->data->dev_private);
+
+ /* Rewriting the domain_id in ZIP_VF_MBOX for app rerun */
+ zip_reg_write64(vf->vbar0, ZIP_VF_PF_MBOXX(0), vf->dom_sdom);
+
+ return 0;
+}
diff --git a/drivers/compress/octeontx/otx_zip.h b/drivers/compress/octeontx/otx_zip.h
new file mode 100644
index 00000000..99a38d00
--- /dev/null
+++ b/drivers/compress/octeontx/otx_zip.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _RTE_OCTEONTX_ZIP_VF_H_
+#define _RTE_OCTEONTX_ZIP_VF_H_
+
+#include <unistd.h>
+
+#include <rte_bus_pci.h>
+#include <rte_comp.h>
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+
+#include <zip_regs.h>
+
+int octtx_zip_logtype_driver;
+
+/* ZIP VF Control/Status registers (CSRs): */
+/* VF_BAR0: */
+#define ZIP_VQ_ENA (0x10)
+#define ZIP_VQ_SBUF_ADDR (0x20)
+#define ZIP_VF_PF_MBOXX(x) (0x400 | (x)<<3)
+#define ZIP_VQ_DOORBELL (0x1000)
+
+/**< Vendor ID */
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+/**< PCI device id of ZIP VF */
+#define PCI_DEVICE_ID_OCTEONTX_ZIPVF 0xA037
+
+/* maxmum number of zip vf devices */
+#define ZIP_MAX_VFS 8
+
+/* max size of one chunk */
+#define ZIP_MAX_CHUNK_SIZE 8192
+
+/* each instruction is fixed 128 bytes */
+#define ZIP_CMD_SIZE 128
+
+#define ZIP_CMD_SIZE_WORDS (ZIP_CMD_SIZE >> 3) /* 16 64_bit words */
+
+/* size of next chunk buffer pointer */
+#define ZIP_MAX_NCBP_SIZE 8
+
+/* size of instruction queue in units of instruction size */
+#define ZIP_MAX_NUM_CMDS ((ZIP_MAX_CHUNK_SIZE - ZIP_MAX_NCBP_SIZE) / \
+ ZIP_CMD_SIZE) /* 63 */
+
+/* size of instruct queue in bytes */
+#define ZIP_MAX_CMDQ_SIZE ((ZIP_MAX_NUM_CMDS * ZIP_CMD_SIZE) + \
+ ZIP_MAX_NCBP_SIZE)/* ~8072ull */
+
+#define ZIP_BUF_SIZE 256
+
+#define ZIP_SGPTR_ALIGN 16
+#define ZIP_CMDQ_ALIGN 128
+#define MAX_SG_LEN ((ZIP_BUF_SIZE - ZIP_SGPTR_ALIGN) / sizeof(void *))
+
+/**< ZIP PMD specified queue pairs */
+#define ZIP_MAX_VF_QUEUE 1
+
+#define ZIP_ALIGN_ROUNDUP(x, _align) \
+ ((_align) * (((x) + (_align) - 1) / (_align)))
+
+/**< ZIP PMD device name */
+#define COMPRESSDEV_NAME_ZIP_PMD compress_octeonx
+
+#define ZIP_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, \
+ octtx_zip_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+#define ZIP_PMD_INFO(fmt, args...) \
+ ZIP_PMD_LOG(INFO, fmt, ## args)
+#define ZIP_PMD_ERR(fmt, args...) \
+ ZIP_PMD_LOG(ERR, fmt, ## args)
+
+/* resources required to process stream */
+enum {
+ RES_BUF = 0,
+ CMD_BUF,
+ HASH_CTX_BUF,
+ DECOMP_CTX_BUF,
+ IN_DATA_BUF,
+ OUT_DATA_BUF,
+ HISTORY_DATA_BUF,
+ MAX_BUFS_PER_STREAM
+} NUM_BUFS_PER_STREAM;
+
+struct zip_stream;
+struct zipvf_qp;
+
+/* Algorithm handler function prototype */
+typedef int (*comp_func_t)(struct rte_comp_op *op,
+ struct zipvf_qp *qp, struct zip_stream *zstrm);
+
+/**
+ * ZIP private stream structure
+ */
+struct zip_stream {
+ union zip_inst_s *inst;
+ /* zip instruction pointer */
+ comp_func_t func;
+ /* function to process comp operation */
+ void *bufs[MAX_BUFS_PER_STREAM];
+} _rte_cache_aligned;
+
+
+/**
+ * ZIP instruction Queue
+ */
+struct zipvf_cmdq {
+ rte_spinlock_t qlock;
+ /* queue lock */
+ uint64_t *sw_head;
+ /* pointer to start of 8-byte word length queue-head */
+ uint8_t *va;
+ /* pointer to instruction queue virtual address */
+ rte_iova_t iova;
+ /* iova addr of cmdq head*/
+};
+
+/**
+ * ZIP device queue structure
+ */
+struct zipvf_qp {
+ struct zipvf_cmdq cmdq;
+ /* Hardware instruction queue structure */
+ struct rte_ring *processed_pkts;
+ /* Ring for placing processed packets */
+ struct rte_compressdev_stats qp_stats;
+ /* Queue pair statistics */
+ uint16_t id;
+ /* Queue Pair Identifier */
+ const char *name;
+ /* Unique Queue Pair Name */
+ struct zip_vf *vf;
+ /* pointer to device, queue belongs to */
+} __rte_cache_aligned;
+
+/**
+ * ZIP VF device structure.
+ */
+struct zip_vf {
+ int vfid;
+ /* vf index */
+ struct rte_pci_device *pdev;
+ /* pci device */
+ void *vbar0;
+ /* CSR base address for underlying BAR0 VF.*/
+ uint64_t dom_sdom;
+ /* Storing mbox domain and subdomain id for app rerun*/
+ uint32_t max_nb_queue_pairs;
+ /* pointer to device qps */
+ struct rte_mempool *zip_mp;
+ /* pointer to pools */
+} __rte_cache_aligned;
+
+
+static inline void
+zipvf_prepare_in_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+ uint32_t offset, inlen;
+ struct rte_mbuf *m_src;
+ union zip_inst_s *inst = zstrm->inst;
+
+ inlen = op->src.length;
+ offset = op->src.offset;
+ m_src = op->m_src;
+
+ /* Prepare direct input data pointer */
+ inst->s.dg = 0;
+ inst->s.inp_ptr_addr.s.addr =
+ rte_pktmbuf_iova_offset(m_src, offset);
+ inst->s.inp_ptr_ctl.s.length = inlen;
+}
+
+static inline void
+zipvf_prepare_out_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+ uint32_t offset;
+ struct rte_mbuf *m_dst;
+ union zip_inst_s *inst = zstrm->inst;
+
+ offset = op->dst.offset;
+ m_dst = op->m_dst;
+
+ /* Prepare direct input data pointer */
+ inst->s.ds = 0;
+ inst->s.out_ptr_addr.s.addr =
+ rte_pktmbuf_iova_offset(m_dst, offset);
+ inst->s.totaloutputlength = rte_pktmbuf_pkt_len(m_dst) -
+ op->dst.offset;
+ inst->s.out_ptr_ctl.s.length = inst->s.totaloutputlength;
+}
+
+static inline void
+zipvf_prepare_cmd_stateless(struct rte_comp_op *op, struct zip_stream *zstrm)
+{
+ union zip_inst_s *inst = zstrm->inst;
+
+ /* set flush flag to always 1*/
+ inst->s.ef = 1;
+
+ if (inst->s.op == ZIP_OP_E_DECOMP)
+ inst->s.sf = 1;
+ else
+ inst->s.sf = 0;
+
+ /* Set input checksum */
+ inst->s.adlercrc32 = op->input_chksum;
+
+ /* Prepare gather buffers */
+ zipvf_prepare_in_buf(zstrm, op);
+ zipvf_prepare_out_buf(zstrm, op);
+}
+
+#ifdef ZIP_DBG
+static inline void
+zip_dump_instruction(void *inst)
+{
+ union zip_inst_s *cmd83 = (union zip_inst_s *)inst;
+ printf("####### START ########\n");
+ printf("doneint:%d totaloutputlength:%d\n", cmd83->s.doneint,
+ cmd83->s.totaloutputlength);
+ printf("exnum:%d iv:%d exbits:%d hmif:%d halg:%d\n", cmd83->s.exn,
+ cmd83->s.iv, cmd83->s.exbits, cmd83->s.hmif, cmd83->s.halg);
+ printf("flush:%d speed:%d cc:%d\n", cmd83->s.sf,
+ cmd83->s.ss, cmd83->s.cc);
+ printf("eof:%d bof:%d op:%d dscatter:%d dgather:%d hgather:%d\n",
+ cmd83->s.ef, cmd83->s.bf, cmd83->s.op, cmd83->s.ds,
+ cmd83->s.dg, cmd83->s.hg);
+ printf("historylength:%d adler32:%d\n", cmd83->s.historylength,
+ cmd83->s.adlercrc32);
+ printf("ctx_ptr.addr:0x%"PRIx64"\n", cmd83->s.ctx_ptr_addr.s.addr);
+ printf("ctx_ptr.len:%d\n", cmd83->s.ctx_ptr_ctl.s.length);
+ printf("history_ptr.addr:0x%"PRIx64"\n", cmd83->s.his_ptr_addr.s.addr);
+ printf("history_ptr.len:%d\n", cmd83->s.his_ptr_ctl.s.length);
+ printf("inp_ptr.addr:0x%"PRIx64"\n", cmd83->s.inp_ptr_addr.s.addr);
+ printf("inp_ptr.len:%d\n", cmd83->s.inp_ptr_ctl.s.length);
+ printf("out_ptr.addr:0x%"PRIx64"\n", cmd83->s.out_ptr_addr.s.addr);
+ printf("out_ptr.len:%d\n", cmd83->s.out_ptr_ctl.s.length);
+ printf("result_ptr.len:%d\n", cmd83->s.res_ptr_ctl.s.length);
+ printf("####### END ########\n");
+}
+#endif
+
+int
+zipvf_create(struct rte_compressdev *compressdev);
+
+int
+zipvf_destroy(struct rte_compressdev *compressdev);
+
+int
+zipvf_q_init(struct zipvf_qp *qp);
+
+int
+zipvf_q_term(struct zipvf_qp *qp);
+
+void
+zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *zcmd);
+
+int
+zip_process_op(struct rte_comp_op *op,
+ struct zipvf_qp *qp,
+ struct zip_stream *zstrm);
+
+uint64_t
+zip_reg_read64(uint8_t *hw_addr, uint64_t offset);
+
+void
+zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val);
+
+#endif /* _RTE_ZIP_VF_H_ */
diff --git a/drivers/compress/octeontx/otx_zip_pmd.c b/drivers/compress/octeontx/otx_zip_pmd.c
new file mode 100644
index 00000000..9d13f933
--- /dev/null
+++ b/drivers/compress/octeontx/otx_zip_pmd.c
@@ -0,0 +1,658 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cpuflags.h>
+#include <rte_malloc.h>
+
+#include "otx_zip.h"
+
+static const struct rte_compressdev_capabilities
+ octtx_zip_pmd_capabilities[] = {
+ { .algo = RTE_COMP_ALGO_DEFLATE,
+ /* Deflate */
+ .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC,
+ /* Non sharable Priv XFORM and Stateless */
+ .window_size = {
+ .min = 1,
+ .max = 14,
+ .increment = 1
+ /* size supported 2^1 to 2^14 */
+ },
+ },
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+};
+
+/*
+ * Reset session to default state for next set of stateless operation
+ */
+static inline void
+reset_stream(struct zip_stream *z_stream)
+{
+ union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
+
+ inst->s.bf = 1;
+ inst->s.ef = 0;
+}
+
+int
+zip_process_op(struct rte_comp_op *op,
+ struct zipvf_qp *qp,
+ struct zip_stream *zstrm)
+{
+ union zip_inst_s *inst = zstrm->inst;
+ volatile union zip_zres_s *zresult = NULL;
+
+
+ if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
+ (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
+ (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZIP_PMD_ERR("Segmented packet is not supported\n");
+ return 0;
+ }
+
+ zipvf_prepare_cmd_stateless(op, zstrm);
+
+ zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
+ zresult->s.compcode = 0;
+
+#ifdef ZIP_DBG
+ zip_dump_instruction(inst);
+#endif
+
+ /* Submit zip command */
+ zipvf_push_command(qp, (void *)inst);
+
+ /* Check and Process results in sync mode */
+ do {
+ } while (!zresult->s.compcode);
+
+ if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ } else {
+ /* FATAL error cannot do anything */
+ ZIP_PMD_ERR("operation failed with error code:%d\n",
+ zresult->s.compcode);
+ if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ else
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ }
+
+ ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
+
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed = zresult->s.totalbytesread;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced = zresult->s.totalbyteswritten;
+ break;
+ default:
+ ZIP_PMD_ERR("stats not updated for status:%d\n",
+ op->status);
+ break;
+ }
+ /* zstream is reset irrespective of result */
+ reset_stream(zstrm);
+
+ zresult->s.compcode = ZIP_COMP_E_NOTDONE;
+ return 0;
+}
+
+/** Parse xform parameters and setup a stream */
+static int
+zip_set_stream_parameters(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ struct zip_stream *z_stream)
+{
+ int ret;
+ union zip_inst_s *inst;
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+ void *res;
+
+ /* Allocate resources required by a stream */
+ ret = rte_mempool_get_bulk(vf->zip_mp,
+ z_stream->bufs, MAX_BUFS_PER_STREAM);
+ if (ret < 0)
+ return -1;
+
+ /* get one command buffer from pool and set up */
+ inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
+ res = z_stream->bufs[RES_BUF];
+
+ memset(inst->u, 0, sizeof(inst->u));
+
+ /* set bf for only first ops of stream */
+ inst->s.bf = 1;
+
+ if (xform->type == RTE_COMP_COMPRESS) {
+ inst->s.op = ZIP_OP_E_COMP;
+
+ switch (xform->compress.deflate.huffman) {
+ case RTE_COMP_HUFFMAN_DEFAULT:
+ inst->s.cc = ZIP_CC_DEFAULT;
+ break;
+ case RTE_COMP_HUFFMAN_FIXED:
+ inst->s.cc = ZIP_CC_FIXED_HUFF;
+ break;
+ case RTE_COMP_HUFFMAN_DYNAMIC:
+ inst->s.cc = ZIP_CC_DYN_HUFF;
+ break;
+ default:
+ ret = -1;
+ goto err;
+ }
+
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_MIN:
+ inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
+ break;
+ case RTE_COMP_LEVEL_MAX:
+ inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
+ break;
+ case RTE_COMP_LEVEL_NONE:
+ ZIP_PMD_ERR("Compression level not supported");
+ ret = -1;
+ goto err;
+ default:
+ /* for any value between min and max , choose
+ * PMD default.
+ */
+ inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
+ break;
+ }
+ } else if (xform->type == RTE_COMP_DECOMPRESS) {
+ inst->s.op = ZIP_OP_E_DECOMP;
+ /* from HRM,
+ * For DEFLATE decompression, [CC] must be 0x0.
+ * For decompression, [SS] must be 0x0
+ */
+ inst->s.cc = 0;
+ /* Speed bit should not be set for decompression */
+ inst->s.ss = 0;
+ /* decompression context is supported only for STATEFUL
+ * operations. Currently we support STATELESS ONLY so
+ * skip setting of ctx pointer
+ */
+
+ } else {
+ ZIP_PMD_ERR("\nxform type not supported");
+ ret = -1;
+ goto err;
+ }
+
+ inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
+ inst->s.res_ptr_ctl.s.length = 0;
+
+ z_stream->inst = inst;
+ z_stream->func = zip_process_op;
+
+ return 0;
+
+err:
+ rte_mempool_put_bulk(vf->zip_mp,
+ (void *)&(z_stream->bufs[0]),
+ MAX_BUFS_PER_STREAM);
+
+ return ret;
+}
+
+/** Configure device */
+static int
+zip_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ int nb_streams;
+ char res_pool[RTE_MEMZONE_NAMESIZE];
+ struct zip_vf *vf;
+ struct rte_mempool *zip_buf_mp;
+
+ if (!config || !dev)
+ return -EIO;
+
+ vf = (struct zip_vf *)(dev->data->dev_private);
+
+ /* create pool with maximum numbers of resources
+ * required by streams
+ */
+
+ /* use common pool for non-shareable priv_xform and stream */
+ nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
+
+ snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
+ dev->data->dev_id);
+
+ /** TBD Should we use the per core object cache for stream resources */
+ zip_buf_mp = rte_mempool_create(
+ res_pool,
+ nb_streams * MAX_BUFS_PER_STREAM,
+ ZIP_BUF_SIZE,
+ 0,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ SOCKET_ID_ANY,
+ 0);
+
+ if (zip_buf_mp == NULL) {
+ ZIP_PMD_ERR(
+ "Failed to create buf mempool octtx_zip_res_pool%u",
+ dev->data->dev_id);
+ return -1;
+ }
+
+ vf->zip_mp = zip_buf_mp;
+
+ return 0;
+}
+
+/** Start device */
+static int
+zip_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+
+}
+
+/** Close device */
+static int
+zip_pmd_close(struct rte_compressdev *dev)
+{
+ if (dev == NULL)
+ return -1;
+
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+ rte_mempool_free(vf->zip_mp);
+
+ return 0;
+}
+
+/** Get device statistics */
+static void
+zip_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zip_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Get device info */
+static void
+zip_pmd_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info)
+{
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_name = dev->device->driver->name;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = octtx_zip_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
+ }
+}
+
+/** Release queue pair */
+static int
+zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ zipvf_q_term(qp);
+
+ if (qp->processed_pkts)
+ rte_ring_free(qp->processed_pkts);
+
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZIP_PMD_INFO("Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+
+ ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct zipvf_qp *qp = NULL;
+ struct zip_vf *vf;
+ char *name;
+ int ret;
+
+ if (!dev)
+ return -1;
+
+ vf = (struct zip_vf *) (dev->data->dev_private);
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
+ return 0;
+ }
+
+ name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ "zip_pmd_%u_qp_%u",
+ dev->data->dev_id, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket(name, sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->name = name;
+
+ /* Create completion queue upto max_inflight_ops */
+ qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ qp->id = qp_id;
+ qp->vf = vf;
+
+ ret = zipvf_q_init(qp);
+ if (ret < 0)
+ goto qp_setup_cleanup;
+
+ dev->data->queue_pairs[qp_id] = qp;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp->processed_pkts)
+ rte_ring_free(qp->processed_pkts);
+ if (qp)
+ rte_free(qp);
+ return -1;
+}
+
+static int
+zip_pmd_stream_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **stream)
+{
+ int ret;
+ struct zip_stream *strm = NULL;
+
+ strm = rte_malloc(NULL,
+ sizeof(struct zip_stream), 0);
+
+ if (strm == NULL)
+ return (-ENOMEM);
+
+ ret = zip_set_stream_parameters(dev, xform, strm);
+ if (ret < 0) {
+ ZIP_PMD_ERR("failed configure xform parameters");
+ rte_free(strm);
+ return ret;
+ }
+ *stream = strm;
+ return 0;
+}
+
+static int
+zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
+{
+ struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
+ struct zip_stream *z_stream;
+
+ if (stream == NULL)
+ return 0;
+
+ z_stream = (struct zip_stream *)stream;
+
+ /* Free resources back to pool */
+ rte_mempool_put_bulk(vf->zip_mp,
+ (void *)&(z_stream->bufs[0]),
+ MAX_BUFS_PER_STREAM);
+
+ /* Zero out the whole structure */
+ memset(stream, 0, sizeof(struct zip_stream));
+ rte_free(stream);
+
+ return 0;
+}
+
+
+static uint16_t
+zip_pmd_enqueue_burst_sync(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zipvf_qp *qp = queue_pair;
+ struct rte_comp_op *op;
+ struct zip_stream *zstrm;
+ int i, ret = 0;
+ uint16_t enqd = 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ op = ops[i];
+
+ if (op->op_type == RTE_COMP_OP_STATEFUL) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ } else {
+ /* process stateless ops */
+ zstrm = (struct zip_stream *)op->private_xform;
+ if (unlikely(zstrm == NULL))
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ else
+ ret = zstrm->func(op, qp, zstrm);
+ }
+
+ /* Whatever is out of op, put it into completion queue with
+ * its status
+ */
+ if (!ret)
+ ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
+
+ if (unlikely(ret < 0)) {
+ /* increment count if failed to enqueue op*/
+ qp->qp_stats.enqueue_err_count++;
+ } else {
+ qp->qp_stats.enqueued_count++;
+ enqd++;
+ }
+ }
+ return enqd;
+}
+
+static uint16_t
+zip_pmd_dequeue_burst_sync(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zipvf_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+struct rte_compressdev_ops octtx_zip_pmd_ops = {
+ .dev_configure = zip_pmd_config,
+ .dev_start = zip_pmd_start,
+ .dev_stop = zip_pmd_stop,
+ .dev_close = zip_pmd_close,
+
+ .stats_get = zip_pmd_stats_get,
+ .stats_reset = zip_pmd_stats_reset,
+
+ .dev_infos_get = zip_pmd_info_get,
+
+ .queue_pair_setup = zip_pmd_qp_setup,
+ .queue_pair_release = zip_pmd_qp_release,
+
+ .private_xform_create = zip_pmd_stream_create,
+ .private_xform_free = zip_pmd_stream_free,
+ .stream_create = NULL,
+ .stream_free = NULL
+};
+
+static int
+zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int ret = 0;
+ char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id(),
+ };
+
+ ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
+ (unsigned int)pci_dev->id.vendor_id,
+ (unsigned int)pci_dev->id.device_id);
+
+ rte_pci_device_name(&pci_dev->addr, compressdev_name,
+ sizeof(compressdev_name));
+
+ compressdev = rte_compressdev_pmd_create(compressdev_name,
+ &pci_dev->device, sizeof(struct zip_vf), &init_params);
+ if (compressdev == NULL) {
+ ZIP_PMD_ERR("driver %s: create failed", init_params.name);
+ return -ENODEV;
+ }
+
+ /*
+ * create only if proc_type is primary.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* create vf dev with given pmd dev id */
+ ret = zipvf_create(compressdev);
+ if (ret < 0) {
+ ZIP_PMD_ERR("Device creation failed");
+ rte_compressdev_pmd_destroy(compressdev);
+ return ret;
+ }
+ }
+
+ compressdev->dev_ops = &octtx_zip_pmd_ops;
+ /* register rx/tx burst functions for data path */
+ compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
+ compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+ return ret;
+}
+
+static int
+zip_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_compressdev *compressdev;
+ char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL) {
+ ZIP_PMD_ERR(" Invalid PCI Device\n");
+ return -EINVAL;
+ }
+ rte_pci_device_name(&pci_dev->addr, compressdev_name,
+ sizeof(compressdev_name));
+
+ compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (zipvf_destroy(compressdev) < 0)
+ return -ENODEV;
+ }
+ return rte_compressdev_pmd_destroy(compressdev);
+}
+
+static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_ZIPVF),
+ },
+ {
+ .device_id = 0
+ },
+};
+
+/**
+ * Structure that represents a PCI driver
+ */
+static struct rte_pci_driver octtx_zip_pmd = {
+ .id_table = pci_id_octtx_zipvf_table,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = zip_pci_probe,
+ .remove = zip_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
+
+RTE_INIT(octtx_zip_init_log);
+
+static void
+octtx_zip_init_log(void)
+{
+ octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
+ if (octtx_zip_logtype_driver >= 0)
+ rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map b/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/compress/qat/meson.build b/drivers/compress/qat/meson.build
new file mode 100644
index 00000000..9d15076d
--- /dev/null
+++ b/drivers/compress/qat/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+
+# Add our sources files to the list
+allow_experimental_apis = true
+qat_sources += files('qat_comp_pmd.c',
+ 'qat_comp.c')
+qat_includes += include_directories('.')
+qat_deps += 'compressdev'
+qat_ext_deps += dep
+
+# build the whole driver
+sources += qat_sources
+cflags += qat_cflags
+deps += qat_deps
+ext_deps += qat_ext_deps
+includes += qat_includes
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
new file mode 100644
index 00000000..38c8a5b8
--- /dev/null
+++ b/drivers/compress/qat/qat_comp.c
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_hexdump.h>
+#include <rte_comp.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+#include <rte_memcpy.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "qat_logs.h"
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+
+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused)
+{
+ struct rte_comp_op *op = in_op;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
+ struct qat_comp_xform *qat_xform = op->private_xform;
+ const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
+ struct icp_qat_fw_comp_req *comp_req =
+ (struct icp_qat_fw_comp_req *)out_msg;
+
+ if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
+ "operation requests, op (%p) is not a "
+ "stateless operation.", op);
+ return -EINVAL;
+ }
+
+ rte_mov128(out_msg, tmpl);
+ comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+
+ /* common for sgl and flat buffers */
+ comp_req->comp_pars.comp_len = op->src.length;
+ comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
+ op->dst.offset;
+
+ if (op->m_src->next != NULL || op->m_dst->next != NULL) {
+ /* sgl */
+ int ret = 0;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+
+ ret = qat_sgl_fill_array(op->m_src,
+ op->src.offset,
+ &cookie->qat_sgl_src,
+ op->src.length,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
+ return ret;
+ }
+
+ ret = qat_sgl_fill_array(op->m_dst,
+ op->dst.offset,
+ &cookie->qat_sgl_dst,
+ comp_req->comp_pars.out_buffer_sz,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
+ return ret;
+ }
+
+ comp_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ comp_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ comp_req->comn_mid.src_length = 0;
+ comp_req->comn_mid.dst_length = 0;
+
+ } else {
+ /* flat aka linear buffer */
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_FLAT);
+ comp_req->comn_mid.src_length = op->src.length;
+ comp_req->comn_mid.dst_length =
+ comp_req->comp_pars.out_buffer_sz;
+
+ comp_req->comn_mid.src_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
+ comp_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+int
+qat_comp_process_response(void **op, uint8_t *resp)
+{
+ struct icp_qat_fw_comp_resp *resp_msg =
+ (struct icp_qat_fw_comp_resp *)resp;
+ struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+ struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
+ (rx_op->private_xform);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comp_resp));
+#endif
+
+ if (likely(qat_xform->qat_comp_request_type
+ != QAT_COMP_REQUEST_DECOMPRESS)) {
+ if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
+ resp_msg->comn_resp.hdr_flags)
+ == ICP_QAT_FW_COMP_NO_CNV)) {
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
+ *op = (void *)rx_op;
+ QAT_DP_LOG(ERR, "QAT has wrong firmware");
+ return 0;
+ }
+ }
+
+ if ((ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(resp_msg->comn_resp.comn_status)
+ | ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(
+ resp_msg->comn_resp.comn_status)) !=
+ ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
+
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status =
+ *((uint16_t *)(&resp_msg->comn_resp.comn_error));
+ } else {
+ struct qat_comp_xform *qat_xform = rx_op->private_xform;
+ struct icp_qat_fw_resp_comp_pars *comp_resp =
+ (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
+
+ rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ rx_op->consumed = comp_resp->input_byte_counter;
+ rx_op->produced = comp_resp->output_byte_counter;
+
+ if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
+ if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+ rx_op->output_chksum = comp_resp->curr_crc32;
+ else if (qat_xform->checksum_type ==
+ RTE_COMP_CHECKSUM_ADLER32)
+ rx_op->output_chksum = comp_resp->curr_adler_32;
+ else
+ rx_op->output_chksum = comp_resp->curr_chksum;
+ }
+ }
+ *op = (void *)rx_op;
+
+ return 0;
+}
+
+unsigned int
+qat_comp_xform_size(void)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
+}
+
+static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_comp_request_type request)
+{
+ if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ else if (request == QAT_COMP_REQUEST_DECOMPRESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+ header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
+ QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
+}
+
+static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
+ const struct rte_memzone *interm_buff_mz __rte_unused,
+ const struct rte_comp_xform *xform)
+{
+ struct icp_qat_fw_comp_req *comp_req;
+ int comp_level, algo;
+ uint32_t req_par_flags;
+ int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
+
+ if (unlikely(qat_xform == NULL)) {
+ QAT_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
+ direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
+ req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
+ ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
+
+ } else {
+ if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
+ else if (xform->compress.level == 1)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
+ else if (xform->compress.level == 2)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
+ else if (xform->compress.level == 3)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
+ else if (xform->compress.level >= 4 &&
+ xform->compress.level <= 9)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
+ else {
+ QAT_LOG(ERR, "compression level not supported");
+ return -EINVAL;
+ }
+ req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
+ ICP_QAT_FW_COMP_CNV_RECOVERY);
+ }
+
+ switch (xform->compress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
+ break;
+ case RTE_COMP_ALGO_LZS:
+ default:
+ /* RTE_COMP_NULL */
+ QAT_LOG(ERR, "compression algorithm not supported");
+ return -EINVAL;
+ }
+
+ comp_req = &qat_xform->qat_comp_req_tmpl;
+
+ /* Initialize header */
+ qat_comp_create_req_hdr(&comp_req->comn_hdr,
+ qat_xform->qat_comp_request_type);
+
+ comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_STATELESS_SESSION,
+ ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+
+ comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
+ direction,
+ /* In CPM 1.6 only valid mode ! */
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
+ /* Translate level to depth */
+ comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ comp_req->comp_pars.initial_adler = 1;
+ comp_req->comp_pars.initial_crc32 = 0;
+ comp_req->comp_pars.req_par_flags = req_par_flags;
+
+
+ if (qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_COMP);
+ } else if (qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
+
+ QAT_LOG(ERR, "Dynamic huffman encoding not supported");
+ return -EINVAL;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+/**
+ * Create driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param xform
+ * xform data from application
+ * @param private_xform
+ * ptr where handle of pmd's private_xform data should be stored
+ * @return
+ * - if successful returns 0
+ * and valid private_xform handle
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support the comp transform.
+ * - Returns -ENOMEM if the private_xform could not be allocated.
+ */
+int
+qat_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ struct qat_comp_dev_private *qat = dev->data->dev_private;
+
+ if (unlikely(private_xform == NULL)) {
+ QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
+ return -EINVAL;
+ }
+ if (unlikely(qat->xformpool == NULL)) {
+ QAT_LOG(ERR, "QAT device has no private_xform mempool");
+ return -ENOMEM;
+ }
+ if (rte_mempool_get(qat->xformpool, private_xform)) {
+ QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
+ return -ENOMEM;
+ }
+
+ struct qat_comp_xform *qat_xform =
+ (struct qat_comp_xform *)*private_xform;
+
+ if (xform->type == RTE_COMP_COMPRESS) {
+ if (xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC) {
+ QAT_LOG(ERR,
+ "QAT device doesn't support dynamic compression");
+ return -ENOTSUP;
+ }
+
+ if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
+ ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
+ && qat->interm_buff_mz == NULL))
+
+ qat_xform->qat_comp_request_type =
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
+
+
+ } else {
+ qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+ }
+
+ qat_xform->checksum_type = xform->compress.chksum;
+
+ if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
+ QAT_LOG(ERR, "QAT: Problem with setting compression");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Free driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param private_xform
+ * handle of pmd's private_xform data
+ * @return
+ * - 0 if successful
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ */
+int
+qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
+ void *private_xform)
+{
+ struct qat_comp_xform *qat_xform =
+ (struct qat_comp_xform *)private_xform;
+
+ if (qat_xform) {
+ memset(qat_xform, 0, qat_comp_xform_size());
+ struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
+
+ rte_mempool_put(mp, qat_xform);
+ return 0;
+ }
+ return -EINVAL;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
new file mode 100644
index 00000000..8d315efb
--- /dev/null
+++ b/drivers/compress/qat/qat_comp.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_COMP_H_
+#define _QAT_COMP_H_
+
+#ifdef RTE_LIBRTE_COMPRESSDEV
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "qat_common.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_fw_la.h"
+
+#define ERR_CODE_QAT_COMP_WRONG_FW -99
+
+enum qat_comp_request_type {
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
+ QAT_COMP_REQUEST_DECOMPRESS,
+ REQ_COMP_END
+};
+
+struct qat_comp_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_comp_op_cookie {
+ struct qat_comp_sgl qat_sgl_src;
+ struct qat_comp_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+struct qat_comp_xform {
+ struct icp_qat_fw_comp_req qat_comp_req_tmpl;
+ enum qat_comp_request_type qat_comp_request_type;
+ enum rte_comp_checksum_type checksum_type;
+};
+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused);
+
+int
+qat_comp_process_response(void **op, uint8_t *resp);
+
+
+int
+qat_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform);
+
+int
+qat_comp_private_xform_free(struct rte_compressdev *dev, void *private_xform);
+
+unsigned int
+qat_comp_xform_size(void);
+
+#endif
+#endif
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
new file mode 100644
index 00000000..b89975fc
--- /dev/null
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+
+static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
+ {/* COMPRESSION - deflate */
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
+ RTE_COMP_FF_CRC32_CHECKSUM |
+ RTE_COMP_FF_ADLER32_CHECKSUM |
+ RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+ .window_size = {.min = 15, .max = 15, .increment = 0} },
+ {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
+
+static void
+qat_comp_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_comp_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void
+qat_comp_stats_reset(struct rte_compressdev *dev)
+{
+ struct qat_comp_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
+
+}
+
+static int
+qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
+{
+ struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release comp qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int
+qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+ struct qat_qp_config qat_qp_conf;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *comp_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_COMPRESSION];
+ const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_comp_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(comp_hw_qps,
+ QAT_SERVICE_COMPRESSION)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_comp_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
+ qat_qp_conf.nb_descriptors = max_inflight_ops;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "comp";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_comp_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_dst);
+ }
+
+ return ret;
+}
+
+static struct rte_mempool *
+qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
+ uint32_t num_elements)
+{
+ char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
+ "%s_xforms", comp_dev->qat_dev->name);
+
+ QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
+ mp = rte_mempool_lookup(xform_pool_name);
+
+ if (mp != NULL) {
+ QAT_LOG(DEBUG, "xformpool already created");
+ if (mp->size != num_elements) {
+ QAT_LOG(DEBUG, "xformpool wrong size - delete it");
+ rte_mempool_free(mp);
+ mp = NULL;
+ comp_dev->xformpool = NULL;
+ }
+ }
+
+ if (mp == NULL)
+ mp = rte_mempool_create(xform_pool_name,
+ num_elements,
+ qat_comp_xform_size(), 0, 0,
+ NULL, NULL, NULL, NULL, rte_socket_id(),
+ 0);
+ if (mp == NULL) {
+ QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
+ xform_pool_name, num_elements, qat_comp_xform_size());
+ return NULL;
+ }
+
+ return mp;
+}
+
+static void
+_qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
+{
+ /* Free private_xform pool */
+ if (comp_dev->xformpool) {
+ /* Free internal mempool for private xforms */
+ rte_mempool_free(comp_dev->xformpool);
+ comp_dev->xformpool = NULL;
+ }
+}
+
+static int
+qat_comp_dev_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+ int ret = 0;
+
+ if (config->max_nb_streams != 0) {
+ QAT_LOG(ERR,
+ "QAT device does not support STATEFUL so max_nb_streams must be 0");
+ return -EINVAL;
+ }
+
+ comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+ config->max_nb_priv_xforms);
+ if (comp_dev->xformpool == NULL) {
+
+ ret = -ENOMEM;
+ goto error_out;
+ }
+ return 0;
+
+error_out:
+ _qat_comp_dev_config_clear(comp_dev);
+ return ret;
+}
+
+static int
+qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
+{
+
+}
+
+static int
+qat_comp_dev_close(struct rte_compressdev *dev)
+{
+ int i;
+ int ret = 0;
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_comp_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ _qat_comp_dev_config_clear(comp_dev);
+
+ return ret;
+}
+
+
+static void
+qat_comp_dev_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *info)
+{
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+ const struct qat_qp_hw_data *comp_hw_qps =
+ qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_COMPRESSION];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ qat_qps_per_service(comp_hw_qps,
+ QAT_SERVICE_COMPRESSION);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = comp_dev->qat_dev_capabilities;
+ }
+}
+
+static uint16_t
+qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
+ struct rte_comp_op **ops __rte_unused,
+ uint16_t nb_ops __rte_unused)
+{
+ QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !");
+ return 0;
+}
+
+static struct rte_compressdev_ops compress_qat_dummy_ops = {
+
+ /* Device related operations */
+ .dev_configure = NULL,
+ .dev_start = NULL,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = NULL,
+
+ .stats_get = NULL,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = NULL,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = NULL,
+ .private_xform_free = qat_comp_private_xform_free
+};
+
+static uint16_t
+qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+
+ if (ret) {
+ if ((*ops)->debug_status ==
+ (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) {
+ tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst =
+ qat_comp_pmd_enq_deq_dummy_op_burst;
+ tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
+ qat_comp_pmd_enq_deq_dummy_op_burst;
+
+ tmp_qp->qat_dev->comp_dev->compressdev->dev_ops =
+ &compress_qat_dummy_ops;
+ QAT_LOG(ERR, "QAT PMD detected wrong FW version !");
+
+ } else {
+ tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
+ qat_comp_pmd_dequeue_op_burst;
+ }
+ }
+ return ret;
+}
+
+static struct rte_compressdev_ops compress_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_comp_dev_config,
+ .dev_start = qat_comp_dev_start,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = qat_comp_dev_info_get,
+
+ .stats_get = qat_comp_stats_get,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = qat_comp_qp_setup,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = qat_comp_private_xform_create,
+ .private_xform_free = qat_comp_private_xform_free
+};
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ if (qat_pci_dev->qat_dev_gen == QAT_GEN1) {
+ QAT_LOG(ERR, "Compression PMD not supported on QAT dh895xcc");
+ return 0;
+ }
+
+ struct rte_compressdev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ };
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct qat_comp_dev_private *comp_dev;
+
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "comp");
+ QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
+
+ compressdev = rte_compressdev_pmd_create(name,
+ &qat_pci_dev->pci_dev->device,
+ sizeof(struct qat_comp_dev_private),
+ &init_params);
+
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ compressdev->dev_ops = &compress_qat_ops;
+
+ compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst;
+ compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst;
+
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+
+ comp_dev = compressdev->data->dev_private;
+ comp_dev->qat_dev = qat_pci_dev;
+ comp_dev->compressdev = compressdev;
+ qat_pci_dev->comp_dev = comp_dev;
+
+ switch (qat_pci_dev->qat_dev_gen) {
+ case QAT_GEN1:
+ case QAT_GEN2:
+ comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ break;
+ default:
+ comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ QAT_LOG(DEBUG,
+ "QAT gen %d capabilities unknown, default to GEN1",
+ qat_pci_dev->qat_dev_gen);
+ break;
+ }
+
+ QAT_LOG(DEBUG,
+ "Created QAT COMP device %s as compressdev instance %d",
+ name, compressdev->data->dev_id);
+ return 0;
+}
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct qat_comp_dev_private *comp_dev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+
+ comp_dev = qat_pci_dev->comp_dev;
+ if (comp_dev == NULL)
+ return 0;
+
+ /* clean up any resources used by the device */
+ qat_comp_dev_close(comp_dev->compressdev);
+
+ rte_compressdev_pmd_destroy(comp_dev->compressdev);
+ qat_pci_dev->comp_dev = NULL;
+
+ return 0;
+}
diff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h
new file mode 100644
index 00000000..9ad2a283
--- /dev/null
+++ b/drivers/compress/qat/qat_comp_pmd.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_COMP_PMD_H_
+#define _QAT_COMP_PMD_H_
+
+#ifdef RTE_LIBRTE_COMPRESSDEV
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "qat_device.h"
+
+/** private data structure for a QAT compression device.
+ * This QAT device is a device offering only a compression service,
+ * there can be one of these on each qat_pci_device (VF).
+ */
+struct qat_comp_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ struct rte_compressdev *compressdev;
+ /**< The pointer to this compression device structure */
+ const struct rte_compressdev_capabilities *qat_dev_capabilities;
+ /* QAT device compression capabilities */
+ const struct rte_memzone *interm_buff_mz;
+ /**< The device's memory for intermediate buffers */
+ struct rte_mempool *xformpool;
+ /**< The device's pool for qat_comp_xforms */
+};
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif
+#endif /* _QAT_COMP_PMD_H_ */
diff --git a/drivers/compress/qat/rte_pmd_qat_version.map b/drivers/compress/qat/rte_pmd_qat_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/drivers/compress/qat/rte_pmd_qat_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/compress/zlib/Makefile b/drivers/compress/zlib/Makefile
new file mode 100644
index 00000000..5cf8de6f
--- /dev/null
+++ b/drivers/compress/zlib/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium Networks
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_zlib.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_zlib_version.map
+
+# external library dependencies
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lz
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/compress/zlib/meson.build b/drivers/compress/zlib/meson.build
new file mode 100644
index 00000000..7748de2d
--- /dev/null
+++ b/drivers/compress/zlib/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium Networks
+
+dep = dependency('zlib', required: false)
+if not dep.found()
+ build = false
+endif
+
+deps += 'bus_vdev'
+sources = files('zlib_pmd.c', 'zlib_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lz'
+
+allow_experimental_apis = true
diff --git a/drivers/compress/zlib/rte_pmd_zlib_version.map b/drivers/compress/zlib/rte_pmd_zlib_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/drivers/compress/zlib/rte_pmd_zlib_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/compress/zlib/zlib_pmd.c b/drivers/compress/zlib/zlib_pmd.c
new file mode 100644
index 00000000..7d6871b1
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd.c
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+
+#include "zlib_pmd_private.h"
+
+/** Compute next mbuf in the list, assign data buffer and length,
+ * returns 0 if mbuf is NULL
+ */
+#define COMPUTE_BUF(mbuf, data, len) \
+ ((mbuf = mbuf->next) ? \
+ (data = rte_pktmbuf_mtod(mbuf, uint8_t *)), \
+ (len = rte_pktmbuf_data_len(mbuf)) : 0)
+
+static void
+process_zlib_deflate(struct rte_comp_op *op, z_stream *strm)
+{
+ int ret, flush, fin_flush;
+ struct rte_mbuf *mbuf_src = op->m_src;
+ struct rte_mbuf *mbuf_dst = op->m_dst;
+
+ switch (op->flush_flag) {
+ case RTE_COMP_FLUSH_FULL:
+ case RTE_COMP_FLUSH_FINAL:
+ fin_flush = Z_FINISH;
+ break;
+ default:
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid flush value\n");
+ }
+
+ if (unlikely(!strm)) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid z_stream\n");
+ return;
+ }
+ /* Update z_stream with the inputs provided by application */
+ strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->src.offset);
+
+ strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
+
+ strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->dst.offset);
+
+ strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
+
+ /* Set flush value to NO_FLUSH unless it is last mbuf */
+ flush = Z_NO_FLUSH;
+ /* Initialize status to SUCCESS */
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ do {
+ /* Set flush value to Z_FINISH for last block */
+ if ((op->src.length - strm->total_in) <= strm->avail_in) {
+ strm->avail_in = (op->src.length - strm->total_in);
+ flush = fin_flush;
+ }
+ do {
+ ret = deflate(strm, flush);
+ if (unlikely(ret == Z_STREAM_ERROR)) {
+ /* error return, do not process further */
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ goto def_end;
+ }
+ /* Break if Z_STREAM_END is encountered */
+ if (ret == Z_STREAM_END)
+ goto def_end;
+
+ /* Keep looping until input mbuf is consumed.
+ * Exit if destination mbuf gets exhausted.
+ */
+ } while ((strm->avail_out == 0) &&
+ COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
+
+ if (!strm->avail_out) {
+ /* there is no space for compressed output */
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ break;
+ }
+
+ /* Update source buffer to next mbuf
+ * Exit if input buffers are fully consumed
+ */
+ } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
+
+def_end:
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed += strm->total_in;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced += strm->total_out;
+ break;
+ default:
+ ZLIB_PMD_ERR("stats not updated for status:%d\n",
+ op->status);
+ }
+
+ deflateReset(strm);
+}
+
+static void
+process_zlib_inflate(struct rte_comp_op *op, z_stream *strm)
+{
+ int ret, flush;
+ struct rte_mbuf *mbuf_src = op->m_src;
+ struct rte_mbuf *mbuf_dst = op->m_dst;
+
+ if (unlikely(!strm)) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid z_stream\n");
+ return;
+ }
+ strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->src.offset);
+
+ strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
+
+ strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->dst.offset);
+
+ strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
+
+ /** Ignoring flush value provided from application for decompression */
+ flush = Z_NO_FLUSH;
+ /* initialize status to SUCCESS */
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ do {
+ do {
+ ret = inflate(strm, flush);
+
+ switch (ret) {
+ /* Fall-through */
+ case Z_NEED_DICT:
+ ret = Z_DATA_ERROR;
+ /* Fall-through */
+ case Z_DATA_ERROR:
+ /* Fall-through */
+ case Z_MEM_ERROR:
+ /* Fall-through */
+ case Z_STREAM_ERROR:
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ /* Fall-through */
+ case Z_STREAM_END:
+ /* no further computation needed if
+ * Z_STREAM_END is encountered
+ */
+ goto inf_end;
+ default:
+ /* success */
+ break;
+
+ }
+ /* Keep looping until input mbuf is consumed.
+ * Exit if destination mbuf gets exhausted.
+ */
+ } while ((strm->avail_out == 0) &&
+ COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
+
+ if (!strm->avail_out) {
+ /* there is no more space for decompressed output */
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ break;
+ }
+ /* Read next input buffer to be processed, exit if compressed
+ * blocks are fully read
+ */
+ } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
+
+inf_end:
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed += strm->total_in;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced += strm->total_out;
+ break;
+ default:
+ ZLIB_PMD_ERR("stats not produced for status:%d\n",
+ op->status);
+ }
+
+ inflateReset(strm);
+}
+
+/** Process comp operation for mbuf */
+static inline int
+process_zlib_op(struct zlib_qp *qp, struct rte_comp_op *op)
+{
+ struct zlib_stream *stream;
+ struct zlib_priv_xform *private_xform;
+
+ if ((op->op_type == RTE_COMP_OP_STATEFUL) ||
+ (op->src.offset > rte_pktmbuf_data_len(op->m_src)) ||
+ (op->dst.offset > rte_pktmbuf_data_len(op->m_dst))) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid source or destination buffers or "
+ "invalid Operation requested\n");
+ } else {
+ private_xform = (struct zlib_priv_xform *)op->private_xform;
+ stream = &private_xform->stream;
+ stream->comp(op, &stream->strm);
+ }
+ /* whatever is out of op, put it into completion queue with
+ * its status
+ */
+ return rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+/** Parse comp xform and set private xform/Stream parameters */
+int
+zlib_set_stream_parameters(const struct rte_comp_xform *xform,
+ struct zlib_stream *stream)
+{
+ int strategy, level, wbits;
+ z_stream *strm = &stream->strm;
+
+ /* allocate deflate state */
+ strm->zalloc = Z_NULL;
+ strm->zfree = Z_NULL;
+ strm->opaque = Z_NULL;
+
+ switch (xform->type) {
+ case RTE_COMP_COMPRESS:
+ stream->comp = process_zlib_deflate;
+ stream->free = deflateEnd;
+ /** Compression window bits */
+ switch (xform->compress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ wbits = -(xform->compress.window_size);
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression algorithm not supported\n");
+ return -1;
+ }
+ /** Compression Level */
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_PMD_DEFAULT:
+ level = Z_DEFAULT_COMPRESSION;
+ break;
+ case RTE_COMP_LEVEL_NONE:
+ level = Z_NO_COMPRESSION;
+ break;
+ case RTE_COMP_LEVEL_MIN:
+ level = Z_BEST_SPEED;
+ break;
+ case RTE_COMP_LEVEL_MAX:
+ level = Z_BEST_COMPRESSION;
+ break;
+ default:
+ level = xform->compress.level;
+ if (level < RTE_COMP_LEVEL_MIN ||
+ level > RTE_COMP_LEVEL_MAX) {
+ ZLIB_PMD_ERR("Compression level %d "
+ "not supported\n",
+ level);
+ return -1;
+ }
+ break;
+ }
+ /** Compression strategy */
+ switch (xform->compress.deflate.huffman) {
+ case RTE_COMP_HUFFMAN_DEFAULT:
+ strategy = Z_DEFAULT_STRATEGY;
+ break;
+ case RTE_COMP_HUFFMAN_FIXED:
+ strategy = Z_FIXED;
+ break;
+ case RTE_COMP_HUFFMAN_DYNAMIC:
+ strategy = Z_DEFAULT_STRATEGY;
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression strategy not supported\n");
+ return -1;
+ }
+ if (deflateInit2(strm, level,
+ Z_DEFLATED, wbits,
+ DEF_MEM_LEVEL, strategy) != Z_OK) {
+ ZLIB_PMD_ERR("Deflate init failed\n");
+ return -1;
+ }
+ break;
+
+ case RTE_COMP_DECOMPRESS:
+ stream->comp = process_zlib_inflate;
+ stream->free = inflateEnd;
+ /** window bits */
+ switch (xform->decompress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ wbits = -(xform->decompress.window_size);
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression algorithm not supported\n");
+ return -1;
+ }
+
+ if (inflateInit2(strm, wbits) != Z_OK) {
+ ZLIB_PMD_ERR("Inflate init failed\n");
+ return -1;
+ }
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static uint16_t
+zlib_pmd_enqueue_burst(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zlib_qp *qp = queue_pair;
+ int ret;
+ uint16_t i;
+ uint16_t enqd = 0;
+ for (i = 0; i < nb_ops; i++) {
+ ret = process_zlib_op(qp, ops[i]);
+ if (unlikely(ret < 0)) {
+ /* increment count if failed to push to completion
+ * queue
+ */
+ qp->qp_stats.enqueue_err_count++;
+ } else {
+ qp->qp_stats.enqueued_count++;
+ enqd++;
+ }
+ }
+ return enqd;
+}
+
+static uint16_t
+zlib_pmd_dequeue_burst(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zlib_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int
+zlib_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_compressdev_pmd_init_params *init_params)
+{
+ struct rte_compressdev *dev;
+
+ dev = rte_compressdev_pmd_create(name, &vdev->device,
+ sizeof(struct zlib_private), init_params);
+ if (dev == NULL) {
+ ZLIB_PMD_ERR("driver %s: create failed", init_params->name);
+ return -ENODEV;
+ }
+
+ dev->dev_ops = rte_zlib_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = zlib_pmd_dequeue_burst;
+ dev->enqueue_burst = zlib_pmd_enqueue_burst;
+
+ return 0;
+}
+
+static int
+zlib_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id()
+ };
+ const char *name;
+ const char *input_args;
+ int retval;
+
+ name = rte_vdev_device_name(vdev);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+
+ retval = rte_compressdev_pmd_parse_input_args(&init_params, input_args);
+ if (retval < 0) {
+ ZLIB_PMD_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]\n",
+ input_args);
+ return -EINVAL;
+ }
+
+ return zlib_create(name, vdev, &init_params);
+}
+
+static int
+zlib_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev *compressdev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ compressdev = rte_compressdev_pmd_get_named_dev(name);
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ return rte_compressdev_pmd_destroy(compressdev);
+}
+
+static struct rte_vdev_driver zlib_pmd_drv = {
+ .probe = zlib_probe,
+ .remove = zlib_remove
+};
+
+RTE_PMD_REGISTER_VDEV(COMPRESSDEV_NAME_ZLIB_PMD, zlib_pmd_drv);
+RTE_INIT(zlib_init_log);
+
+static void
+zlib_init_log(void)
+{
+ zlib_logtype_driver = rte_log_register("pmd.compress.zlib");
+ if (zlib_logtype_driver >= 0)
+ rte_log_set_level(zlib_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/drivers/compress/zlib/zlib_pmd_ops.c b/drivers/compress/zlib/zlib_pmd_ops.c
new file mode 100644
index 00000000..0a73aed9
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd_ops.c
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "zlib_pmd_private.h"
+
+static const struct rte_compressdev_capabilities zlib_pmd_capabilities[] = {
+ { /* Deflate */
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = (RTE_COMP_FF_NONCOMPRESSED_BLOCKS |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC),
+ .window_size = {
+ .min = 8,
+ .max = 15,
+ .increment = 1
+ },
+ },
+
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+
+};
+
+/** Configure device */
+static int
+zlib_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct rte_mempool *mp;
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct zlib_private *internals = dev->data->dev_private;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "stream_mp_%u", dev->data->dev_id);
+ mp = internals->mp;
+ if (mp == NULL) {
+ mp = rte_mempool_create(mp_name,
+ config->max_nb_priv_xforms +
+ config->max_nb_streams,
+ sizeof(struct zlib_priv_xform),
+ 0, 0, NULL, NULL, NULL,
+ NULL, config->socket_id,
+ 0);
+ if (mp == NULL) {
+ ZLIB_PMD_ERR("Cannot create private xform pool on "
+ "socket %d\n", config->socket_id);
+ return -ENOMEM;
+ }
+ internals->mp = mp;
+ }
+ return 0;
+}
+
+/** Start device */
+static int
+zlib_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zlib_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+}
+
+/** Close device */
+static int
+zlib_pmd_close(struct rte_compressdev *dev)
+{
+ struct zlib_private *internals = dev->data->dev_private;
+ rte_mempool_free(internals->mp);
+ internals->mp = NULL;
+ return 0;
+}
+
+/** Get device statistics */
+static void
+zlib_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zlib_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Get device info */
+static void
+zlib_pmd_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info)
+{
+ if (dev_info != NULL) {
+ dev_info->driver_name = dev->device->name;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = zlib_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+zlib_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ rte_ring_free(qp->processed_pkts);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+zlib_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+ struct zlib_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "zlib_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zlib_pmd_qp_create_processed_pkts_ring(struct zlib_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r = qp->processed_pkts;
+
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZLIB_PMD_INFO("Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+
+ ZLIB_PMD_ERR("Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zlib_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct zlib_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ zlib_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ZLIB PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (zlib_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_pkts = zlib_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp) {
+ rte_free(qp);
+ qp = NULL;
+ }
+ return -1;
+}
+
+/** Configure stream */
+static int
+zlib_pmd_stream_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **zstream)
+{
+ int ret = 0;
+ struct zlib_stream *stream;
+ struct zlib_private *internals = dev->data->dev_private;
+
+ if (xform == NULL) {
+ ZLIB_PMD_ERR("invalid xform struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(internals->mp, zstream)) {
+ ZLIB_PMD_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ stream = *((struct zlib_stream **)zstream);
+
+ ret = zlib_set_stream_parameters(xform, stream);
+
+ if (ret < 0) {
+ ZLIB_PMD_ERR("failed configure session parameters");
+
+ memset(stream, 0, sizeof(struct zlib_stream));
+ /* Return session to mempool */
+ rte_mempool_put(internals->mp, stream);
+ return ret;
+ }
+
+ return 0;
+}
+
+/** Configure private xform */
+static int
+zlib_pmd_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ return zlib_pmd_stream_create(dev, xform, private_xform);
+}
+
+/** Clear the memory of stream so it doesn't leave key material behind */
+static int
+zlib_pmd_stream_free(__rte_unused struct rte_compressdev *dev,
+ void *zstream)
+{
+ struct zlib_stream *stream = (struct zlib_stream *)zstream;
+ if (!stream)
+ return -EINVAL;
+
+ stream->free(&stream->strm);
+ /* Zero out the whole structure */
+ memset(stream, 0, sizeof(struct zlib_stream));
+ struct rte_mempool *mp = rte_mempool_from_obj(stream);
+ rte_mempool_put(mp, stream);
+
+ return 0;
+}
+
+/** Clear the memory of stream so it doesn't leave key material behind */
+static int
+zlib_pmd_private_xform_free(struct rte_compressdev *dev,
+ void *private_xform)
+{
+ return zlib_pmd_stream_free(dev, private_xform);
+}
+
+struct rte_compressdev_ops zlib_pmd_ops = {
+ .dev_configure = zlib_pmd_config,
+ .dev_start = zlib_pmd_start,
+ .dev_stop = zlib_pmd_stop,
+ .dev_close = zlib_pmd_close,
+
+ .stats_get = zlib_pmd_stats_get,
+ .stats_reset = zlib_pmd_stats_reset,
+
+ .dev_infos_get = zlib_pmd_info_get,
+
+ .queue_pair_setup = zlib_pmd_qp_setup,
+ .queue_pair_release = zlib_pmd_qp_release,
+
+ .private_xform_create = zlib_pmd_private_xform_create,
+ .private_xform_free = zlib_pmd_private_xform_free,
+
+ .stream_create = NULL,
+ .stream_free = NULL
+};
+
+struct rte_compressdev_ops *rte_zlib_pmd_ops = &zlib_pmd_ops;
diff --git a/drivers/compress/zlib/zlib_pmd_private.h b/drivers/compress/zlib/zlib_pmd_private.h
new file mode 100644
index 00000000..2c6e83d4
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd_private.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef _RTE_ZLIB_PMD_PRIVATE_H_
+#define _RTE_ZLIB_PMD_PRIVATE_H_
+
+#include <zlib.h>
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#define COMPRESSDEV_NAME_ZLIB_PMD compress_zlib
+/**< ZLIB PMD device name */
+
+#define DEF_MEM_LEVEL 8
+
+int zlib_logtype_driver;
+#define ZLIB_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, zlib_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+#define ZLIB_PMD_INFO(fmt, args...) \
+ ZLIB_PMD_LOG(INFO, fmt, ## args)
+#define ZLIB_PMD_ERR(fmt, args...) \
+ ZLIB_PMD_LOG(ERR, fmt, ## args)
+#define ZLIB_PMD_WARN(fmt, args...) \
+ ZLIB_PMD_LOG(WARNING, fmt, ## args)
+
+struct zlib_private {
+ struct rte_mempool *mp;
+};
+
+struct zlib_qp {
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_compressdev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+} __rte_cache_aligned;
+
+/* Algorithm handler function prototype */
+typedef void (*comp_func_t)(struct rte_comp_op *op, z_stream *strm);
+
+typedef int (*comp_free_t)(z_stream *strm);
+
+/** ZLIB Stream structure */
+struct zlib_stream {
+ z_stream strm;
+ /**< zlib stream structure */
+ comp_func_t comp;
+ /**< Operation (compression/decompression) */
+ comp_free_t free;
+ /**< Free Operation (compression/decompression) */
+} __rte_cache_aligned;
+
+/** ZLIB private xform structure */
+struct zlib_priv_xform {
+ struct zlib_stream stream;
+} __rte_cache_aligned;
+
+int
+zlib_set_stream_parameters(const struct rte_comp_xform *xform,
+ struct zlib_stream *stream);
+
+/** Device specific operations function pointer structure */
+extern struct rte_compressdev_ops *rte_zlib_pmd_ops;
+
+#endif /* _RTE_ZLIB_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 1d0c88ef..c480cbd3 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -8,7 +8,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
-DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 80360dd9..752e0cd6 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -31,8 +31,8 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
auth_xform = xform;
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
- GCM_LOG_ERR("Only AES GMAC is supported as an "
- "authentication only algorithm");
+ AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
+ "authentication only algorithm");
return -ENOTSUP;
}
/* Set IV parameters */
@@ -54,7 +54,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
aead_xform = xform;
if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
- GCM_LOG_ERR("The only combined operation "
+ AESNI_GCM_LOG(ERR, "The only combined operation "
"supported is AES GCM");
return -ENOTSUP;
}
@@ -75,7 +75,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
sess->aad_length = aead_xform->aead.aad_length;
digest_length = aead_xform->aead.digest_length;
} else {
- GCM_LOG_ERR("Wrong xform type, has to be AEAD or authentication");
+ AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
return -ENOTSUP;
}
@@ -83,7 +83,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
/* IV check */
if (sess->iv.length != 16 && sess->iv.length != 12 &&
sess->iv.length != 0) {
- GCM_LOG_ERR("Wrong IV length");
+ AESNI_GCM_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
@@ -99,7 +99,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
sess->key = AESNI_GCM_KEY_256;
break;
default:
- GCM_LOG_ERR("Invalid key length");
+ AESNI_GCM_LOG(ERR, "Invalid key length");
return -EINVAL;
}
@@ -109,7 +109,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
if (digest_length != 16 &&
digest_length != 12 &&
digest_length != 8) {
- GCM_LOG_ERR("digest");
+ AESNI_GCM_LOG(ERR, "Invalid digest length");
return -EINVAL;
}
sess->digest_length = digest_length;
@@ -127,7 +127,7 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(sym_op->session != NULL))
sess = (struct aesni_gcm_session *)
- get_session_private_data(
+ get_sym_session_private_data(
sym_op->session,
cryptodev_driver_id);
} else {
@@ -149,8 +149,8 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(sym_op->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(sym_op->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -464,13 +464,13 @@ aesni_gcm_create(const char *name,
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
- GCM_LOG_ERR("AES instructions not supported by CPU");
+ AESNI_GCM_LOG(ERR, "AES instructions not supported by CPU");
return -EFAULT;
}
-
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- GCM_LOG_ERR("driver %s: create failed", init_params->name);
+ AESNI_GCM_LOG(ERR, "driver %s: create failed",
+ init_params->name);
return -ENODEV;
}
@@ -492,7 +492,8 @@ aesni_gcm_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_CPU_AESNI |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
switch (vector_mode) {
case RTE_AESNI_GCM_SSE:
@@ -513,7 +514,13 @@ aesni_gcm_create(const char *name,
internals->vector_mode = vector_mode;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+ imb_get_version_str());
+#else
+ AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
+#endif
return 0;
}
@@ -525,8 +532,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct aesni_gcm_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -568,7 +574,12 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
cryptodev_driver_id);
+
+
+RTE_INIT(aesni_gcm_init_log)
+{
+ aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm");
+}
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index 6f542137..b6b4dd02 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -143,7 +143,8 @@ aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
dev_info->capabilities = aesni_gcm_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
}
}
@@ -183,12 +184,11 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- GCM_LOG_INFO("Reusing existing ring %s for processed"
- " packets", qp->name);
+ AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed"
+ " packets", qp->name);
return r;
}
-
- GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
+ AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed"
" packets", qp->name);
return NULL;
}
@@ -242,22 +242,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
@@ -267,14 +251,14 @@ aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the aesni gcm session structure */
static unsigned
-aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct aesni_gcm_session);
}
/** Configure a aesni gcm session from a crypto xform chain */
static int
-aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -284,26 +268,26 @@ aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
struct aesni_gcm_private *internals = dev->data->dev_private;
if (unlikely(sess == NULL)) {
- GCM_LOG_ERR("invalid session struct");
+ AESNI_GCM_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ AESNI_GCM_LOG(ERR,
+ "Couldn't get object from session mempool");
return -ENOMEM;
}
ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
sess_private_data, xform);
if (ret != 0) {
- GCM_LOG_ERR("failed configure session parameters");
+ AESNI_GCM_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -311,17 +295,17 @@ aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev,
+aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -339,13 +323,11 @@ struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
.queue_pair_setup = aesni_gcm_pmd_qp_setup,
.queue_pair_release = aesni_gcm_pmd_qp_release,
- .queue_pair_start = aesni_gcm_pmd_qp_start,
- .queue_pair_stop = aesni_gcm_pmd_qp_stop,
.queue_pair_count = aesni_gcm_pmd_qp_count,
- .session_get_size = aesni_gcm_pmd_session_get_size,
- .session_configure = aesni_gcm_pmd_session_configure,
- .session_clear = aesni_gcm_pmd_session_clear
+ .sym_session_get_size = aesni_gcm_pmd_sym_session_get_size,
+ .sym_session_configure = aesni_gcm_pmd_sym_session_configure,
+ .sym_session_clear = aesni_gcm_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 3d60583b..c13a12a5 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -7,28 +7,24 @@
#include "aesni_gcm_ops.h"
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
/**< AES-NI GCM PMD device name */
-#define GCM_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
-#define GCM_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
- __func__, __LINE__, ## args)
-
-#define GCM_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define GCM_LOG_INFO(fmt, args...)
-#define GCM_LOG_DBG(fmt, args...)
-#endif
+/** AES-NI GCM PMD LOGTYPE DRIVER */
+int aesni_gcm_logtype_driver;
+#define AESNI_GCM_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, aesni_gcm_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
/* Maximum length for digest */
#define DIGEST_LENGTH_MAX 16
@@ -39,8 +35,6 @@ struct aesni_gcm_private {
/**< Vector mode */
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
struct aesni_gcm_qp {
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index bb35c66a..93dc7a44 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -108,7 +108,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
}
if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
- MB_LOG_ERR("Crypto xform struct not of type auth");
+ AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
return -1;
}
@@ -161,7 +161,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
hash_oneblock_fn = mb_ops->aux.one_block.sha512;
break;
default:
- MB_LOG_ERR("Unsupported authentication algorithm selection");
+ AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
return -ENOTSUP;
}
@@ -182,6 +182,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
const struct rte_crypto_sym_xform *xform)
{
uint8_t is_aes = 0;
+ uint8_t is_3DES = 0;
aes_keyexp_t aes_keyexp_fn;
if (xform == NULL) {
@@ -190,7 +191,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
}
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
- MB_LOG_ERR("Crypto xform struct not of type cipher");
+ AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
return -EINVAL;
}
@@ -203,7 +204,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->cipher.direction = DECRYPT;
break;
default:
- MB_LOG_ERR("Invalid cipher operation parameter");
+ AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
return -EINVAL;
}
@@ -227,8 +228,12 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
sess->cipher.mode = DOCSIS_DES;
break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.mode = DES3;
+ is_3DES = 1;
+ break;
default:
- MB_LOG_ERR("Unsupported cipher mode parameter");
+ AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
return -ENOTSUP;
}
@@ -252,7 +257,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
break;
default:
- MB_LOG_ERR("Invalid cipher key length");
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
return -EINVAL;
}
@@ -261,9 +266,52 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->cipher.expanded_aes_keys.encode,
sess->cipher.expanded_aes_keys.decode);
+ } else if (is_3DES) {
+ uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
+ sess->cipher.exp_3des_keys.key[1],
+ sess->cipher.exp_3des_keys.key[2]};
+
+ switch (xform->cipher.key.length) {
+ case 24:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+ des_key_schedule(keys[1], xform->cipher.key.data+8);
+ des_key_schedule(keys[2], xform->cipher.key.data+16);
+
+ /* Initialize keys - 24 bytes: [K1-K2-K3] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
+ break;
+ case 16:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+ des_key_schedule(keys[1], xform->cipher.key.data+8);
+
+ /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+ break;
+ case 8:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+
+ /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ sess->cipher.key_length_in_bytes = 24;
+#else
+ sess->cipher.key_length_in_bytes = 8;
+#endif
} else {
if (xform->cipher.key.length != 8) {
- MB_LOG_ERR("Invalid cipher key length");
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
return -EINVAL;
}
sess->cipher.key_length_in_bytes = 8;
@@ -294,7 +342,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
break;
default:
- MB_LOG_ERR("Invalid aead operation parameter");
+ AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
return -EINVAL;
}
@@ -304,7 +352,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->auth.algo = AES_CCM;
break;
default:
- MB_LOG_ERR("Unsupported aead mode parameter");
+ AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
return -ENOTSUP;
}
@@ -320,7 +368,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
break;
default:
- MB_LOG_ERR("Invalid cipher key length");
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
return -EINVAL;
}
@@ -391,7 +439,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
break;
case AESNI_MB_OP_NOT_SUPPORTED:
default:
- MB_LOG_ERR("Unsupported operation chain order parameter");
+ AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
return -ENOTSUP;
}
@@ -400,14 +448,14 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
if (ret != 0) {
- MB_LOG_ERR("Invalid/unsupported authentication parameters");
+ AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
return ret;
}
ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
cipher_xform);
if (ret != 0) {
- MB_LOG_ERR("Invalid/unsupported cipher parameters");
+ AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
return ret;
}
@@ -415,7 +463,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
ret = aesni_mb_set_session_aead_parameters(mb_ops, sess,
aead_xform);
if (ret != 0) {
- MB_LOG_ERR("Invalid/unsupported aead parameters");
+ AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
return ret;
}
}
@@ -458,7 +506,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(op->sym->session != NULL))
sess = (struct aesni_mb_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
} else {
@@ -480,8 +528,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -524,8 +572,20 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->cipher_mode = session->cipher.mode;
job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
- job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
- job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
+
+ if (job->cipher_mode == DES3) {
+ job->aes_enc_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ job->aes_dec_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ } else {
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ }
+
+
/* Set authentication parameters */
@@ -555,7 +615,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
char *odata = rte_pktmbuf_append(m_dst,
rte_pktmbuf_data_len(op->sym->m_src));
if (odata == NULL) {
- MB_LOG_ERR("failed to allocate space in destination "
+ AESNI_MB_LOG(ERR, "failed to allocate space in destination "
"mbuf for source data");
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
return -1;
@@ -658,7 +718,7 @@ static inline struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
- struct aesni_mb_session *sess = get_session_private_data(
+ struct aesni_mb_session *sess = get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
@@ -721,7 +781,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
if (processed_jobs == nb_ops)
break;
- job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.get_completed_job)(qp->mb_mgr);
}
return processed_jobs;
@@ -734,7 +794,7 @@ flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
int processed_ops = 0;
/* Flush the remaining jobs */
- JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
+ JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(qp->mb_mgr);
if (job)
processed_ops += handle_completed_jobs(qp, job,
@@ -779,14 +839,14 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
break;
/* Get next free mb job struct from mb manager */
- job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
if (unlikely(job == NULL)) {
/* if no free mb job structs we need to flush mb_mgr */
processed_jobs += flush_mb_mgr(qp,
&ops[processed_jobs],
(nb_ops - processed_jobs) - 1);
- job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
}
retval = set_mb_job_params(job, qp, op, &digest_idx);
@@ -796,7 +856,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
}
/* Submit job to multi-buffer for processing */
- job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.submit)(qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
@@ -832,13 +892,13 @@ cryptodev_aesni_mb_create(const char *name,
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
- MB_LOG_ERR("AES instructions not supported by CPU");
+ AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
return -EFAULT;
}
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- MB_LOG_ERR("failed to create cryptodev vdev");
+ AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
return -ENODEV;
}
@@ -885,7 +945,13 @@ cryptodev_aesni_mb_create(const char *name,
internals->vector_mode = vector_mode;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+ imb_get_version_str());
+#else
+ AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
+#endif
return 0;
}
@@ -897,8 +963,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct aesni_mb_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name, *args;
int retval;
@@ -911,7 +976,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
if (retval) {
- MB_LOG_ERR("Failed to parse initialisation arguments[%s]\n",
+ AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
args);
return -EINVAL;
}
@@ -947,8 +1012,12 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
cryptodev_aesni_mb_pmd_drv.driver,
cryptodev_driver_id);
+
+RTE_INIT(aesni_mb_init_log)
+{
+ aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb");
+}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 01530523..ab26e5ae 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -239,6 +239,26 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
{ /* DES DOCSIS BPI */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -387,7 +407,8 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = aesni_mb_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
}
}
@@ -402,6 +423,8 @@ aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
r = rte_ring_lookup(qp->name);
if (r)
rte_ring_free(r);
+ if (qp->mb_mgr)
+ free_mb_mgr(qp->mb_mgr);
rte_free(qp);
dev->data->queue_pairs[qp_id] = NULL;
}
@@ -441,12 +464,12 @@ aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
r = rte_ring_lookup(ring_name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- MB_LOG_INFO("Reusing existing ring %s for processed ops",
+ AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
ring_name);
return r;
}
- MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
+ AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
ring_name);
return NULL;
}
@@ -463,6 +486,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
{
struct aesni_mb_qp *qp = NULL;
struct aesni_mb_private *internals = dev->data->dev_private;
+ int ret = -1;
/* Free memory prior to re-allocation if needed. */
if (dev->data->queue_pairs[qp_id] != NULL)
@@ -481,12 +505,20 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
goto qp_setup_cleanup;
+ qp->mb_mgr = alloc_mb_mgr(0);
+ if (qp->mb_mgr == NULL) {
+ ret = -ENOMEM;
+ goto qp_setup_cleanup;
+ }
+
qp->op_fns = &job_ops[internals->vector_mode];
qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
"ingress", qp_conf->nb_descriptors, socket_id);
- if (qp->ingress_queue == NULL)
+ if (qp->ingress_queue == NULL) {
+ ret = -1;
goto qp_setup_cleanup;
+ }
qp->sess_mp = session_pool;
@@ -498,30 +530,17 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
"digest_mp_%u_%u", dev->data->dev_id, qp_id);
/* Initialise multi-buffer manager */
- (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
+ (*qp->op_fns->job.init_mgr)(qp->mb_mgr);
return 0;
qp_setup_cleanup:
- if (qp)
+ if (qp) {
+ if (qp->mb_mgr == NULL)
+ free_mb_mgr(qp->mb_mgr);
rte_free(qp);
+ }
- return -1;
-}
-
-/** Start queue pair */
-static int
-aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
+ return ret;
}
/** Return the number of allocated queue pairs */
@@ -533,14 +552,14 @@ aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the aesni multi-buffer session structure */
static unsigned
-aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct aesni_mb_session);
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
static int
-aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
+aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -550,27 +569,27 @@ aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
int ret;
if (unlikely(sess == NULL)) {
- MB_LOG_ERR("invalid session struct");
+ AESNI_MB_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ AESNI_MB_LOG(ERR,
+ "Couldn't get object from session mempool");
return -ENOMEM;
}
ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
sess_private_data, xform);
if (ret != 0) {
- MB_LOG_ERR("failed configure session parameters");
+ AESNI_MB_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -578,17 +597,17 @@ aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-aesni_mb_pmd_session_clear(struct rte_cryptodev *dev,
+aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct aesni_mb_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -606,13 +625,11 @@ struct rte_cryptodev_ops aesni_mb_pmd_ops = {
.queue_pair_setup = aesni_mb_pmd_qp_setup,
.queue_pair_release = aesni_mb_pmd_qp_release,
- .queue_pair_start = aesni_mb_pmd_qp_start,
- .queue_pair_stop = aesni_mb_pmd_qp_stop,
.queue_pair_count = aesni_mb_pmd_qp_count,
- .session_get_size = aesni_mb_pmd_session_get_size,
- .session_configure = aesni_mb_pmd_session_configure,
- .session_clear = aesni_mb_pmd_session_clear
+ .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
+ .sym_session_configure = aesni_mb_pmd_sym_session_configure,
+ .sym_session_clear = aesni_mb_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index a33b2f69..70e9d18e 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -7,28 +7,26 @@
#include "aesni_mb_ops.h"
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
/**< AES-NI Multi buffer PMD device name */
-#define MB_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
-#define MB_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- CRYPTODEV_NAME_AESNI_MB_PMD, \
- __func__, __LINE__, ## args)
-
-#define MB_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- CRYPTODEV_NAME_AESNI_MB_PMD, \
- __func__, __LINE__, ## args)
-#else
-#define MB_LOG_INFO(fmt, args...)
-#define MB_LOG_DBG(fmt, args...)
-#endif
+/** AESNI_MB PMD LOGTYPE DRIVER */
+int aesni_mb_logtype_driver;
+
+#define AESNI_MB_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, aesni_mb_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
#define HMAC_IPAD_VALUE (0x36)
#define HMAC_OPAD_VALUE (0x5C)
@@ -124,8 +122,6 @@ struct aesni_mb_private {
/**< CPU vector instruction set mode */
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
/** AESNI Multi buffer queue pair */
@@ -136,7 +132,7 @@ struct aesni_mb_qp {
/**< Unique Queue Pair Name */
const struct aesni_mb_op_fns *op_fns;
/**< Vector mode dependent pointer table of the multi-buffer APIs */
- MB_MGR mb_mgr;
+ MB_MGR *mb_mgr;
/**< Multi-buffer instance */
struct rte_ring *ingress_queue;
/**< Ring for placing operations ready for processing */
@@ -173,12 +169,18 @@ struct aesni_mb_session {
uint64_t key_length_in_bytes;
- struct {
- uint32_t encode[60] __rte_aligned(16);
- /**< encode key */
- uint32_t decode[60] __rte_aligned(16);
- /**< decode key */
- } expanded_aes_keys;
+ union {
+ struct {
+ uint32_t encode[60] __rte_aligned(16);
+ /**< encode key */
+ uint32_t decode[60] __rte_aligned(16);
+ /**< decode key */
+ } expanded_aes_keys;
+ struct {
+ const void *ks_ptr[3];
+ uint64_t key[3][16];
+ } exp_3des_keys;
+ };
/**< Expanded AES keys - Allocating space to
* contain the maximum expanded key size which
* is 240 bytes for 256 bit AES, calculate by:
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index fbb08f72..9d15fee5 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -502,7 +502,7 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
/* get existing session */
if (likely(op->sym->session != NULL)) {
sess = (struct armv8_crypto_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
}
@@ -526,8 +526,8 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -779,7 +779,6 @@ cryptodev_armv8_crypto_create(const char *name,
internals = dev->data->dev_private;
internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
@@ -800,8 +799,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
"",
sizeof(struct armv8_crypto_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -848,7 +846,6 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
index c64aef09..ae03117e 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -154,7 +154,8 @@ armv8_crypto_pmd_info_get(struct rte_cryptodev *dev,
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = armv8_crypto_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
}
}
@@ -257,22 +258,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-armv8_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-armv8_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev)
@@ -282,14 +267,14 @@ armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the session structure */
static unsigned
-armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+armv8_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct armv8_crypto_session);
}
/** Configure the session from a crypto xform chain */
static int
-armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev,
+armv8_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -317,7 +302,7 @@ armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev,
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -325,17 +310,17 @@ armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+armv8_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct armv8_crypto_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -353,13 +338,11 @@ struct rte_cryptodev_ops armv8_crypto_pmd_ops = {
.queue_pair_setup = armv8_crypto_pmd_qp_setup,
.queue_pair_release = armv8_crypto_pmd_qp_release,
- .queue_pair_start = armv8_crypto_pmd_qp_start,
- .queue_pair_stop = armv8_crypto_pmd_qp_stop,
.queue_pair_count = armv8_crypto_pmd_qp_count,
- .session_get_size = armv8_crypto_pmd_session_get_size,
- .session_configure = armv8_crypto_pmd_session_configure,
- .session_clear = armv8_crypto_pmd_session_clear
+ .sym_session_get_size = armv8_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = armv8_crypto_pmd_sym_session_configure,
+ .sym_session_clear = armv8_crypto_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
index b8966e93..7feb021d 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_private.h
+++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -106,8 +106,6 @@ typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *);
struct armv8_crypto_private {
unsigned int max_nb_qpairs;
/**< Max number of queue pairs */
- unsigned int max_nb_sessions;
- /**< Max number of sessions */
};
/** ARMv8 crypto queue pair */
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 3ce0f39f..19ae9153 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -1566,7 +1566,7 @@ ccp_perform_hmac(struct rte_crypto_op *op,
void *append_ptr;
uint8_t *addr;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
addr = session->auth.pre_compute;
@@ -1739,7 +1739,7 @@ ccp_perform_sha(struct rte_crypto_op *op,
void *append_ptr;
uint64_t auth_msg_bits;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
@@ -1828,7 +1828,7 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
uint32_t tail;
phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
@@ -1968,7 +1968,7 @@ ccp_perform_sha3(struct rte_crypto_op *op,
uint32_t tail;
phys_addr_t src_addr, dest_addr, ctx_paddr;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
@@ -2036,7 +2036,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
phys_addr_t src_addr, dest_addr, key_addr;
int length, non_align_len;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
key_addr = rte_mem_virt2phy(session->auth.key_ccp);
@@ -2188,7 +2188,7 @@ ccp_perform_aes(struct rte_crypto_op *op,
phys_addr_t src_addr, dest_addr, key_addr;
uint8_t *iv;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
function.raw = 0;
@@ -2276,7 +2276,7 @@ ccp_perform_3des(struct rte_crypto_op *op,
uint8_t *iv;
phys_addr_t src_addr, dest_addr, key_addr;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
@@ -2379,7 +2379,7 @@ ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
phys_addr_t digest_dest_addr;
int length, non_align_len;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
@@ -2546,7 +2546,7 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
int result = 0;
struct ccp_session *session;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
@@ -2584,7 +2584,7 @@ ccp_crypto_auth(struct rte_crypto_op *op,
int result = 0;
struct ccp_session *session;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
@@ -2654,7 +2654,7 @@ ccp_crypto_aead(struct rte_crypto_op *op,
int result = 0;
struct ccp_session *session;
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
@@ -2711,7 +2711,7 @@ process_ops_to_enqueue(struct ccp_qp *qp,
b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
Q_DESC_SIZE);
for (i = 0; i < nb_ops; i++) {
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op[i]->sym->session,
ccp_cryptodev_driver_id);
switch (session->cmd_id) {
@@ -2787,7 +2787,7 @@ static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
int offset, digest_offset;
uint8_t digest_le[64];
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
@@ -2863,7 +2863,7 @@ ccp_prepare_ops(struct ccp_qp *qp,
for (i = 0; i < min_ops; i++) {
op_d[i] = b_info->op[b_info->op_idx++];
- session = (struct ccp_session *)get_session_private_data(
+ session = (struct ccp_session *)get_sym_session_private_data(
op_d[i]->sym->session,
ccp_cryptodev_driver_id);
switch (session->cmd_id) {
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 80b75ccb..6984913f 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -624,7 +624,8 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
if (internals->auth_opt == 1)
dev_info->capabilities = ccp_crypto_cap_complete;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
}
}
@@ -747,20 +748,6 @@ qp_setup_cleanup:
return -1;
}
-static int
-ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
- uint16_t queue_pair_id __rte_unused)
-{
- return -ENOTSUP;
-}
-
-static int
-ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
- uint16_t queue_pair_id __rte_unused)
-{
- return -ENOTSUP;
-}
-
static uint32_t
ccp_pmd_qp_count(struct rte_cryptodev *dev)
{
@@ -768,13 +755,13 @@ ccp_pmd_qp_count(struct rte_cryptodev *dev)
}
static unsigned
-ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+ccp_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct ccp_session);
}
static int
-ccp_pmd_session_configure(struct rte_cryptodev *dev,
+ccp_pmd_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -801,25 +788,25 @@ ccp_pmd_session_configure(struct rte_cryptodev *dev,
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
}
static void
-ccp_pmd_session_clear(struct rte_cryptodev *dev,
+ccp_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
if (sess_priv) {
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
rte_mempool_put(sess_mp, sess_priv);
memset(sess_priv, 0, sizeof(struct ccp_session));
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
}
}
@@ -836,13 +823,11 @@ struct rte_cryptodev_ops ccp_ops = {
.queue_pair_setup = ccp_pmd_qp_setup,
.queue_pair_release = ccp_pmd_qp_release,
- .queue_pair_start = ccp_pmd_qp_start,
- .queue_pair_stop = ccp_pmd_qp_stop,
.queue_pair_count = ccp_pmd_qp_count,
- .session_get_size = ccp_pmd_session_get_size,
- .session_configure = ccp_pmd_session_configure,
- .session_clear = ccp_pmd_session_clear,
+ .sym_session_get_size = ccp_pmd_sym_session_get_size,
+ .sym_session_configure = ccp_pmd_sym_session_configure,
+ .sym_session_clear = ccp_pmd_sym_session_clear,
};
struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index f4498048..79752f68 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -40,7 +40,6 @@
/* private data structure for each CCP crypto device */
struct ccp_private {
unsigned int max_nb_qpairs; /**< Max number of queue pairs */
- unsigned int max_nb_sessions; /**< Max number of sessions */
uint8_t crypto_num_dev; /**< Number of working crypto devices */
bool auth_opt; /**< Authentication offload option */
struct ccp_device *last_dev; /**< Last working crypto device */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 2061f465..92d8a955 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -30,14 +30,12 @@ struct ccp_pmd_init_params {
#define CCP_CRYPTODEV_PARAM_NAME ("name")
#define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id")
#define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs")
-#define CCP_CRYPTODEV_PARAM_MAX_NB_SESS ("max_nb_sessions")
#define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt")
const char *ccp_pmd_valid_params[] = {
CCP_CRYPTODEV_PARAM_NAME,
CCP_CRYPTODEV_PARAM_SOCKET_ID,
CCP_CRYPTODEV_PARAM_MAX_NB_QP,
- CCP_CRYPTODEV_PARAM_MAX_NB_SESS,
CCP_CRYPTODEV_PARAM_AUTH_OPT,
};
@@ -125,13 +123,6 @@ ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params,
goto free_kvlist;
ret = rte_kvargs_process(kvlist,
- CCP_CRYPTODEV_PARAM_MAX_NB_SESS,
- &parse_integer_arg,
- &params->def_p.max_nb_sessions);
- if (ret < 0)
- goto free_kvlist;
-
- ret = rte_kvargs_process(kvlist,
CCP_CRYPTODEV_PARAM_SOCKET_ID,
&parse_integer_arg,
&params->def_p.socket_id);
@@ -169,7 +160,7 @@ get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
return NULL;
sess = (struct ccp_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
ccp_cryptodev_driver_id);
} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
@@ -192,7 +183,7 @@ get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session,
+ set_sym_session_private_data(op->sym->session,
ccp_cryptodev_driver_id,
_sess_private_data);
}
@@ -334,7 +325,6 @@ cryptodev_ccp_create(const char *name,
internals = dev->data->dev_private;
internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->def_p.max_nb_sessions;
internals->auth_opt = init_params->auth_opt;
internals->crypto_num_dev = cryptodev_cnt;
@@ -359,8 +349,7 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct ccp_private),
rte_socket_id(),
- CCP_PMD_MAX_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ CCP_PMD_MAX_QUEUE_PAIRS
},
.auth_opt = CCP_PMD_AUTH_OPT_CCP,
};
@@ -382,8 +371,6 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev)
init_params.def_p.socket_id);
RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
init_params.def_p.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, "Max number of sessions = %d\n",
- init_params.def_p.max_nb_sessions);
RTE_LOG(INFO, PMD, "Authentication offload to %s\n",
((init_params.auth_opt == 0) ? "CCP" : "CPU"));
@@ -404,7 +391,6 @@ static struct cryptodev_driver ccp_crypto_drv;
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int> "
"ccp_auth_opt=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver,
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 56fa969d..2a3c61c6 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1080,7 +1080,7 @@ build_sec_fd(struct rte_crypto_op *op,
PMD_INIT_FUNC_TRACE();
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
- sess = (dpaa2_sec_session *)get_session_private_data(
+ sess = (dpaa2_sec_session *)get_sym_session_private_data(
op->sym->session, cryptodev_driver_id);
else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
sess = (dpaa2_sec_session *)get_sec_session_private_data(
@@ -1470,26 +1470,6 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return retcode;
}
-/** Start queue pair */
-static int
-dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
-/** Stop queue pair */
-static int
-dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
@@ -1501,7 +1481,7 @@ dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
/** Returns the size of the aesni gcm session structure */
static unsigned int
-dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
PMD_INIT_FUNC_TRACE();
@@ -2456,7 +2436,7 @@ dpaa2_sec_security_session_destroy(void *dev __rte_unused,
}
static int
-dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -2477,7 +2457,7 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -2485,12 +2465,12 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-dpaa2_sec_session_clear(struct rte_cryptodev *dev,
+dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
PMD_INIT_FUNC_TRACE();
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
if (sess_priv) {
@@ -2499,7 +2479,7 @@ dpaa2_sec_session_clear(struct rte_cryptodev *dev,
rte_free(s->auth_key.data);
memset(sess, 0, sizeof(dpaa2_sec_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -2626,7 +2606,8 @@ dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
info->feature_flags = dev->feature_flags;
info->capabilities = dpaa2_sec_capabilities;
- info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
info->driver_id = cryptodev_driver_id;
}
}
@@ -2715,12 +2696,10 @@ static struct rte_cryptodev_ops crypto_ops = {
.stats_reset = dpaa2_sec_stats_reset,
.queue_pair_setup = dpaa2_sec_queue_pair_setup,
.queue_pair_release = dpaa2_sec_queue_pair_release,
- .queue_pair_start = dpaa2_sec_queue_pair_start,
- .queue_pair_stop = dpaa2_sec_queue_pair_stop,
.queue_pair_count = dpaa2_sec_queue_pair_count,
- .session_get_size = dpaa2_sec_session_get_size,
- .session_configure = dpaa2_sec_session_configure,
- .session_clear = dpaa2_sec_session_clear,
+ .sym_session_get_size = dpaa2_sec_sym_session_get_size,
+ .sym_session_configure = dpaa2_sec_sym_session_configure,
+ .sym_session_clear = dpaa2_sec_sym_session_clear,
};
static const struct rte_security_capability *
@@ -2783,10 +2762,13 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_SECURITY |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
internals = cryptodev->data->dev_private;
- internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
/*
* For secondary processes, we don't initialise any further as primary
@@ -2940,9 +2922,7 @@ RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
-RTE_INIT(dpaa2_sec_init_log);
-static void
-dpaa2_sec_init_log(void)
+RTE_INIT(dpaa2_sec_init_log)
{
/* Bus level logs */
dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
index 7c1f5e73..8a990442 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -18,7 +18,7 @@ extern int dpaa2_logtype_sec;
rte_log(RTE_LOG_DEBUG, dpaa2_logtype_sec, "dpaa2_sec: %s(): " \
fmt "\n", __func__, ##args)
-#define PMD_INIT_FUNC_TRACE() DPAA2_SEC_LOG(DEBUG, " >>")
+#define PMD_INIT_FUNC_TRACE() DPAA2_SEC_DEBUG(">>")
#define DPAA2_SEC_INFO(fmt, args...) \
DPAA2_SEC_LOG(INFO, fmt, ## args)
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index a9d83ebc..d015be1e 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -23,8 +23,6 @@ struct dpaa2_sec_dev_private {
uint16_t token; /**< Token required by DPxxx objects */
unsigned int max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned int max_nb_sessions;
- /**< Max number of sessions supported by device */
};
struct dpaa2_sec_qp {
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 06f7e437..f571050b 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -526,12 +526,25 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
{
struct qman_fq *fq;
unsigned int pkts = 0;
- int ret;
+ int num_rx_bufs, ret;
struct qm_dqrr_entry *dq;
+ uint32_t vdqcr_flags = 0;
fq = &qp->outq;
- ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
- DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
+ /*
+ * Until request for four buffers, we provide exact number of buffers.
+ * Otherwise we do not set the QM_VDQCR_EXACT flag.
+ * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+ * requested, so we request two less in this case.
+ */
+ if (nb_ops < 4) {
+ vdqcr_flags = QM_VDQCR_EXACT;
+ num_rx_bufs = nb_ops;
+ } else {
+ num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+ (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
+ }
+ ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
if (ret)
return 0;
@@ -1416,7 +1429,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
switch (op->sess_type) {
case RTE_CRYPTO_OP_WITH_SESSION:
ses = (dpaa_sec_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
break;
@@ -1585,26 +1598,6 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return 0;
}
-/** Start queue pair */
-static int
-dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
-/** Stop queue pair */
-static int
-dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
@@ -1616,7 +1609,7 @@ dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
/** Returns the size of session structure */
static unsigned int
-dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
PMD_INIT_FUNC_TRACE();
@@ -1755,34 +1748,6 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
}
static int
-dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
- uint16_t qp_id __rte_unused,
- void *ses __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
-static int
-dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
- uint16_t qp_id __rte_unused,
- void *ses)
-{
- dpaa_sec_session *sess = ses;
- struct dpaa_sec_dev_private *qi = dev->data->dev_private;
-
- PMD_INIT_FUNC_TRACE();
-
- if (sess->inq)
- dpaa_sec_detach_rxq(qi, sess->inq);
- sess->inq = NULL;
-
- sess->qp = NULL;
-
- return 0;
-}
-
-static int
dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *sess)
{
@@ -1859,7 +1824,7 @@ err1:
}
static int
-dpaa_sec_session_configure(struct rte_cryptodev *dev,
+dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -1883,7 +1848,7 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
@@ -1892,12 +1857,12 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-dpaa_sec_session_clear(struct rte_cryptodev *dev,
+dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
struct dpaa_sec_dev_private *qi = dev->data->dev_private;
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
PMD_INIT_FUNC_TRACE();
@@ -1911,7 +1876,7 @@ dpaa_sec_session_clear(struct rte_cryptodev *dev,
rte_free(s->cipher_key.data);
rte_free(s->auth_key.data);
memset(s, 0, sizeof(dpaa_sec_session));
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -2215,9 +2180,6 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
info->feature_flags = dev->feature_flags;
info->capabilities = dpaa_sec_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->sym.max_nb_sessions_per_qp =
- RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
- RTE_DPAA_MAX_NB_SEC_QPS;
info->driver_id = cryptodev_driver_id;
}
}
@@ -2230,14 +2192,10 @@ static struct rte_cryptodev_ops crypto_ops = {
.dev_infos_get = dpaa_sec_dev_infos_get,
.queue_pair_setup = dpaa_sec_queue_pair_setup,
.queue_pair_release = dpaa_sec_queue_pair_release,
- .queue_pair_start = dpaa_sec_queue_pair_start,
- .queue_pair_stop = dpaa_sec_queue_pair_stop,
.queue_pair_count = dpaa_sec_queue_pair_count,
- .session_get_size = dpaa_sec_session_get_size,
- .session_configure = dpaa_sec_session_configure,
- .session_clear = dpaa_sec_session_clear,
- .qp_attach_session = dpaa_sec_qp_attach_sess,
- .qp_detach_session = dpaa_sec_qp_detach_sess,
+ .sym_session_get_size = dpaa_sec_sym_session_get_size,
+ .sym_session_configure = dpaa_sec_sym_session_configure,
+ .sym_session_clear = dpaa_sec_sym_session_clear
};
static const struct rte_security_capability *
@@ -2296,7 +2254,11 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_SECURITY |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
internals = cryptodev->data->dev_private;
internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
@@ -2449,9 +2411,7 @@ RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
cryptodev_driver_id);
-RTE_INIT(dpaa_sec_init_log);
-static void
-dpaa_sec_init_log(void)
+RTE_INIT(dpaa_sec_init_log)
{
dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
if (dpaa_logtype_sec >= 0)
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index e15e373f..ac6c00a6 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -26,6 +26,7 @@
#define CTX_POOL_NUM_BUFS 32000
#define CTX_POOL_BUF_SIZE sizeof(struct dpaa_sec_op_ctx)
#define CTX_POOL_CACHE_SIZE 512
+#define RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS 2048
#define DIR_ENC 1
#define DIR_DEC 0
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
index 9784fcbf..fb895a8b 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_log.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
@@ -18,7 +18,7 @@ extern int dpaa_logtype_sec;
rte_log(RTE_LOG_DEBUG, dpaa_logtype_sec, "dpaa_sec: %s(): " \
fmt "\n", __func__, ##args)
-#define PMD_INIT_FUNC_TRACE() DPAA_SEC_LOG(DEBUG, " >>")
+#define PMD_INIT_FUNC_TRACE() DPAA_SEC_DEBUG(" >>")
#define DPAA_SEC_INFO(fmt, args...) \
DPAA_SEC_LOG(INFO, fmt, ## args)
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 205dc1de..239a1cf4 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <rte_common.h>
@@ -79,18 +79,20 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
break;
case KASUMI_OP_NOT_SUPPORTED:
default:
- KASUMI_LOG_ERR("Unsupported operation chain order parameter");
+ KASUMI_LOG(ERR, "Unsupported operation chain order parameter");
return -ENOTSUP;
}
if (cipher_xform) {
/* Only KASUMI F8 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8)
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
+ KASUMI_LOG(ERR, "Unsupported cipher algorithm ");
return -ENOTSUP;
+ }
sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
- KASUMI_LOG_ERR("Wrong IV length");
+ KASUMI_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
@@ -101,11 +103,13 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
if (auth_xform) {
/* Only KASUMI F9 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9)
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
+ KASUMI_LOG(ERR, "Unsupported authentication");
return -ENOTSUP;
+ }
if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
- KASUMI_LOG_ERR("Wrong digest length");
+ KASUMI_LOG(ERR, "Wrong digest length");
return -EINVAL;
}
@@ -131,7 +135,7 @@ kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(op->sym->session != NULL))
sess = (struct kasumi_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
} else {
@@ -153,8 +157,8 @@ kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -213,7 +217,7 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
if (op->sym->m_dst == NULL) {
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("bit-level in-place not supported\n");
+ KASUMI_LOG(ERR, "bit-level in-place not supported");
return 0;
}
dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
@@ -244,7 +248,7 @@ process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("offset");
+ KASUMI_LOG(ERR, "Invalid Offset");
break;
}
@@ -409,9 +413,9 @@ kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
(curr_c_op->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
curr_c_op->sym->m_dst))) {
- KASUMI_LOG_ERR("PMD supports only contiguous mbufs, "
+ KASUMI_LOG(ERR, "PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
- "source/destination buffer.\n", curr_c_op);
+ "source/destination buffer.", curr_c_op);
curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
break;
}
@@ -531,7 +535,7 @@ cryptodev_kasumi_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- KASUMI_LOG_ERR("failed to create cryptodev vdev");
+ KASUMI_LOG(ERR, "failed to create cryptodev vdev");
goto init_error;
}
@@ -555,11 +559,10 @@ cryptodev_kasumi_create(const char *name,
internals = dev->data->dev_private;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed",
+ KASUMI_LOG(ERR, "driver %s: failed",
init_params->name);
cryptodev_kasumi_remove(vdev);
@@ -573,8 +576,7 @@ cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct kasumi_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -617,7 +619,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(kasumi_init_log)
+{
+ kasumi_logtype_driver = rte_log_register("pmd.crypto.kasumi");
+}
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
index a388dbb6..9e4bf1b5 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <string.h>
@@ -126,7 +126,8 @@ kasumi_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = kasumi_pmd_capabilities;
}
@@ -171,13 +172,13 @@ kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) == ring_size) {
- KASUMI_LOG_INFO("Reusing existing ring %s"
+ KASUMI_LOG(INFO, "Reusing existing ring %s"
" for processed packets",
qp->name);
return r;
}
- KASUMI_LOG_ERR("Unable to reuse existing ring %s"
+ KASUMI_LOG(ERR, "Unable to reuse existing ring %s"
" for processed packets",
qp->name);
return NULL;
@@ -228,22 +229,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-kasumi_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-kasumi_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
kasumi_pmd_qp_count(struct rte_cryptodev *dev)
@@ -253,14 +238,14 @@ kasumi_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the KASUMI session structure */
static unsigned
-kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+kasumi_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct kasumi_session);
}
/** Configure a KASUMI session from a crypto xform chain */
static int
-kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+kasumi_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -269,26 +254,26 @@ kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
int ret;
if (unlikely(sess == NULL)) {
- KASUMI_LOG_ERR("invalid session struct");
+ KASUMI_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ KASUMI_LOG(ERR,
+ "Couldn't get object from session mempool");
return -ENOMEM;
}
ret = kasumi_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- KASUMI_LOG_ERR("failed configure session parameters");
+ KASUMI_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -296,17 +281,17 @@ kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-kasumi_pmd_session_clear(struct rte_cryptodev *dev,
+kasumi_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct kasumi_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -324,13 +309,11 @@ struct rte_cryptodev_ops kasumi_pmd_ops = {
.queue_pair_setup = kasumi_pmd_qp_setup,
.queue_pair_release = kasumi_pmd_qp_release,
- .queue_pair_start = kasumi_pmd_qp_start,
- .queue_pair_stop = kasumi_pmd_qp_stop,
.queue_pair_count = kasumi_pmd_qp_count,
- .session_get_size = kasumi_pmd_session_get_size,
- .session_configure = kasumi_pmd_session_configure,
- .session_clear = kasumi_pmd_session_clear
+ .sym_session_get_size = kasumi_pmd_sym_session_get_size,
+ .sym_session_configure = kasumi_pmd_sym_session_configure,
+ .sym_session_clear = kasumi_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
index a397bee6..488777ca 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#ifndef _RTE_KASUMI_PMD_PRIVATE_H_
@@ -10,25 +10,13 @@
#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
/**< KASUMI PMD device name */
-#define KASUMI_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
- __func__, __LINE__, ## args)
+/** KASUMI PMD LOGTYPE DRIVER */
+int kasumi_logtype_driver;
-#ifdef RTE_LIBRTE_KASUMI_DEBUG
-#define KASUMI_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
- __func__, __LINE__, ## args)
-
-#define KASUMI_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define KASUMI_LOG_INFO(fmt, args...)
-#define KASUMI_LOG_DBG(fmt, args...)
-#endif
+#define KASUMI_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, kasumi_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
#define KASUMI_DIGEST_LENGTH 4
@@ -36,8 +24,6 @@
struct kasumi_private {
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
/** KASUMI buffer queue pair */
diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd.c b/drivers/crypto/mvsam/rte_mrvl_pmd.c
index 1b6029a5..73eff757 100644
--- a/drivers/crypto/mvsam/rte_mrvl_pmd.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd.c
@@ -16,8 +16,23 @@
#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+#define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
+#define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
+
static uint8_t cryptodev_driver_id;
+struct mrvl_pmd_init_params {
+ struct rte_cryptodev_pmd_init_params common;
+ uint32_t max_nb_sessions;
+};
+
+const char *mrvl_pmd_valid_params[] = {
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+ MRVL_PMD_MAX_NB_SESS_ARG
+};
+
/**
* Flag if particular crypto algorithm is supported by PMD/MUSDK.
*
@@ -432,7 +447,7 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
return -EINVAL;
}
- sess = (struct mrvl_crypto_session *)get_session_private_data(
+ sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
op->sym->session, cryptodev_driver_id);
if (unlikely(sess == NULL)) {
MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
@@ -691,14 +706,15 @@ mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
static int
cryptodev_mrvl_crypto_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_cryptodev_pmd_init_params *init_params)
+ struct mrvl_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct mrvl_crypto_private *internals;
struct sam_init_params sam_params;
int ret;
- dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device,
+ &init_params->common);
if (dev == NULL) {
MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
@@ -718,7 +734,7 @@ cryptodev_mrvl_crypto_create(const char *name,
/* Set vector instructions mode supported */
internals = dev->data->dev_private;
- internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
/*
@@ -740,12 +756,99 @@ cryptodev_mrvl_crypto_create(const char *name,
init_error:
MRVL_CRYPTO_LOG_ERR(
- "driver %s: %s failed", init_params->name, __func__);
+ "driver %s: %s failed", init_params->common.name, __func__);
cryptodev_mrvl_crypto_uninit(vdev);
return -EFAULT;
}
+/** Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse name */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int
+mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ mrvl_pmd_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ /* Common VDEV parameters */
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ &parse_integer_arg,
+ &params->common.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+ &parse_integer_arg,
+ &params->common.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ &parse_name_arg,
+ &params->common);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ MRVL_PMD_MAX_NB_SESS_ARG,
+ &parse_integer_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
/**
* Initialize the crypto device.
*
@@ -755,7 +858,18 @@ init_error:
static int
cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
{
- struct rte_cryptodev_pmd_init_params init_params = { };
+ struct mrvl_pmd_init_params init_params = {
+ .common = {
+ .name = "",
+ .private_data_size =
+ sizeof(struct mrvl_crypto_private),
+ .max_nb_queue_pairs =
+ sam_get_num_inst() * SAM_HW_RING_NUM,
+ .socket_id = rte_socket_id()
+ },
+ .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
+ };
+
const char *name, *args;
int ret;
@@ -764,13 +878,7 @@ cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
return -EINVAL;
args = rte_vdev_device_args(vdev);
- init_params.private_data_size = sizeof(struct mrvl_crypto_private);
- init_params.max_nb_queue_pairs = sam_get_num_inst() * SAM_HW_RING_NUM;
- init_params.max_nb_sessions =
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
- init_params.socket_id = rte_socket_id();
-
- ret = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ ret = mrvl_pmd_parse_input_args(&init_params, args);
if (ret) {
RTE_LOG(ERR, PMD,
"Failed to parse initialisation arguments[%s]\n",
diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
index 3f8de37b..c045562c 100644
--- a/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
@@ -595,32 +595,6 @@ mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return -1;
}
-/** Start queue pair (PMD ops callback) - not supported.
- *
- * @param dev Pointer to the device structure.
- * @param qp_id ID of the Queue Pair.
- * @returns -ENOTSUP. Always.
- */
-static int
-mrvl_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair (PMD ops callback) - not supported.
- *
- * @param dev Pointer to the device structure.
- * @param qp_id ID of the Queue Pair.
- * @returns -ENOTSUP. Always.
- */
-static int
-mrvl_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs (PMD ops callback).
*
* @param dev Pointer to the device structure.
@@ -638,7 +612,7 @@ mrvl_crypto_pmd_qp_count(struct rte_cryptodev *dev)
* @returns Size of Marvell crypto session.
*/
static unsigned
-mrvl_crypto_pmd_session_get_size(__rte_unused struct rte_cryptodev *dev)
+mrvl_crypto_pmd_sym_session_get_size(__rte_unused struct rte_cryptodev *dev)
{
return sizeof(struct mrvl_crypto_session);
}
@@ -651,7 +625,7 @@ mrvl_crypto_pmd_session_get_size(__rte_unused struct rte_cryptodev *dev)
* @returns 0 upon success, negative value otherwise.
*/
static int
-mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev,
+mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mp)
@@ -679,7 +653,7 @@ mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev,
return ret;
}
- set_session_private_data(sess, dev->driver_id, sess_private_data);
+ set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
mrvl_sess = (struct mrvl_crypto_session *)sess_private_data;
if (sam_session_create(&mrvl_sess->sam_sess_params,
@@ -698,12 +672,12 @@ mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev,
* @returns 0. Always.
*/
static void
-mrvl_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+mrvl_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
@@ -717,7 +691,7 @@ mrvl_crypto_pmd_session_clear(struct rte_cryptodev *dev,
memset(sess, 0, sizeof(struct mrvl_crypto_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -738,13 +712,11 @@ static struct rte_cryptodev_ops mrvl_crypto_pmd_ops = {
.queue_pair_setup = mrvl_crypto_pmd_qp_setup,
.queue_pair_release = mrvl_crypto_pmd_qp_release,
- .queue_pair_start = mrvl_crypto_pmd_qp_start,
- .queue_pair_stop = mrvl_crypto_pmd_qp_stop,
.queue_pair_count = mrvl_crypto_pmd_qp_count,
- .session_get_size = mrvl_crypto_pmd_session_get_size,
- .session_configure = mrvl_crypto_pmd_session_configure,
- .session_clear = mrvl_crypto_pmd_session_clear
+ .sym_session_get_size = mrvl_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = mrvl_crypto_pmd_sym_session_configure,
+ .sym_session_clear = mrvl_crypto_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops = &mrvl_crypto_pmd_ops;
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index 052b6546..6e29a21a 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -78,7 +78,7 @@ get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(sym_op->session != NULL))
sess = (struct null_crypto_session *)
- get_session_private_data(
+ get_sym_session_private_data(
sym_op->session, cryptodev_driver_id);
} else {
void *_sess = NULL;
@@ -99,8 +99,8 @@ get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(sym_op->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
return sess;
@@ -161,10 +161,9 @@ cryptodev_null_create(const char *name,
{
struct rte_cryptodev *dev;
struct null_crypto_private *internals;
-
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ NULL_LOG(ERR, "failed to create cryptodev vdev");
return -EFAULT;
}
@@ -177,12 +176,11 @@ cryptodev_null_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL;
internals = dev->data->dev_private;
internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
}
@@ -195,8 +193,7 @@ cryptodev_null_probe(struct rte_vdev_device *dev)
"",
sizeof(struct null_crypto_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name, *args;
int retval;
@@ -209,8 +206,9 @@ cryptodev_null_probe(struct rte_vdev_device *dev)
retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
if (retval) {
- RTE_LOG(ERR, PMD,
- "Failed to parse initialisation arguments[%s]\n", args);
+ NULL_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]",
+ args);
return -EINVAL;
}
@@ -245,7 +243,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv.driver,
cryptodev_driver_id);
+
+RTE_INIT(null_init_log)
+{
+ null_logtype_driver = rte_log_register("pmd.crypto.null");
+}
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
index f8e5f61f..bb2b6e14 100644
--- a/drivers/crypto/null/null_crypto_pmd_ops.c
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -121,7 +121,8 @@ null_crypto_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = null_crypto_pmd_capabilities;
}
@@ -163,15 +164,15 @@ null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- NULL_CRYPTO_LOG_INFO(
- "Reusing existing ring %s for processed packets",
- qp->name);
+ NULL_LOG(INFO,
+ "Reusing existing ring %s for "
+ " processed packets", qp->name);
return r;
}
- NULL_CRYPTO_LOG_INFO(
- "Unable to reuse existing ring %s for processed packets",
- qp->name);
+ NULL_LOG(INFO,
+ "Unable to reuse existing ring %s for "
+ " processed packets", qp->name);
return NULL;
}
@@ -190,7 +191,7 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
int retval;
if (qp_id >= internals->max_nb_qpairs) {
- NULL_CRYPTO_LOG_ERR("Invalid qp_id %u, greater than maximum "
+ NULL_LOG(ERR, "Invalid qp_id %u, greater than maximum "
"number of queue pairs supported (%u).",
qp_id, internals->max_nb_qpairs);
return (-EINVAL);
@@ -204,7 +205,7 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
RTE_CACHE_LINE_SIZE, socket_id);
if (qp == NULL) {
- NULL_CRYPTO_LOG_ERR("Failed to allocate queue pair memory");
+ NULL_LOG(ERR, "Failed to allocate queue pair memory");
return (-ENOMEM);
}
@@ -213,15 +214,16 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
retval = null_crypto_pmd_qp_set_unique_name(dev, qp);
if (retval) {
- NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
+ NULL_LOG(ERR, "Failed to create unique name for null "
"crypto device");
+
goto qp_setup_cleanup;
}
qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp,
qp_conf->nb_descriptors, socket_id);
if (qp->processed_pkts == NULL) {
- NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
+ NULL_LOG(ERR, "Failed to create unique name for null "
"crypto device");
goto qp_setup_cleanup;
}
@@ -239,22 +241,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-null_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-null_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
null_crypto_pmd_qp_count(struct rte_cryptodev *dev)
@@ -264,14 +250,14 @@ null_crypto_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the NULL crypto session structure */
static unsigned
-null_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+null_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct null_crypto_session);
}
/** Configure a null crypto session from a crypto xform chain */
static int
-null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+null_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mp)
@@ -280,26 +266,26 @@ null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
int ret;
if (unlikely(sess == NULL)) {
- NULL_CRYPTO_LOG_ERR("invalid session struct");
+ NULL_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mp, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ NULL_LOG(ERR,
+ "Couldn't get object from session mempool");
return -ENOMEM;
}
ret = null_crypto_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- NULL_CRYPTO_LOG_ERR("failed configure session parameters");
+ NULL_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mp, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -307,17 +293,17 @@ null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-null_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+null_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct null_crypto_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -335,13 +321,11 @@ struct rte_cryptodev_ops pmd_ops = {
.queue_pair_setup = null_crypto_pmd_qp_setup,
.queue_pair_release = null_crypto_pmd_qp_release,
- .queue_pair_start = null_crypto_pmd_qp_start,
- .queue_pair_stop = null_crypto_pmd_qp_stop,
.queue_pair_count = null_crypto_pmd_qp_count,
- .session_get_size = null_crypto_pmd_session_get_size,
- .session_configure = null_crypto_pmd_session_configure,
- .session_clear = null_crypto_pmd_session_clear
+ .sym_session_get_size = null_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = null_crypto_pmd_sym_session_configure,
+ .sym_session_clear = null_crypto_pmd_sym_session_clear
};
struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops;
diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h
index 0fd13362..d5905afd 100644
--- a/drivers/crypto/null/null_crypto_pmd_private.h
+++ b/drivers/crypto/null/null_crypto_pmd_private.h
@@ -8,31 +8,17 @@
#define CRYPTODEV_NAME_NULL_PMD crypto_null
/**< Null crypto PMD device name */
-#define NULL_CRYPTO_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
- __func__, __LINE__, ## args)
+int null_logtype_driver;
-#ifdef RTE_LIBRTE_NULL_CRYPTO_DEBUG
-#define NULL_CRYPTO_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
- __func__, __LINE__, ## args)
-
-#define NULL_CRYPTO_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define NULL_CRYPTO_LOG_INFO(fmt, args...)
-#define NULL_CRYPTO_LOG_DBG(fmt, args...)
-#endif
+#define NULL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, null_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
/** private data structure for each NULL crypto device */
struct null_crypto_private {
unsigned max_nb_qpairs; /**< Max number of queue pairs */
- unsigned max_nb_sessions; /**< Max number of sessions */
};
/** NULL crypto queue pair */
diff --git a/drivers/crypto/openssl/compat.h b/drivers/crypto/openssl/compat.h
new file mode 100644
index 00000000..45f9a33d
--- /dev/null
+++ b/drivers/crypto/openssl/compat.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef __RTA_COMPAT_H__
+#define __RTA_COMPAT_H__
+
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+
+#define set_rsa_params(rsa, p, q, ret) \
+ do {rsa->p = p; rsa->q = q; ret = 0; } while (0)
+
+#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
+ do { \
+ rsa->dmp1 = dmp1; \
+ rsa->dmq1 = dmq1; \
+ rsa->iqmp = iqmp; \
+ ret = 0; \
+ } while (0)
+
+#define set_rsa_keys(rsa, n, e, d, ret) \
+ do { \
+ rsa->n = n; rsa->e = e; rsa->d = d; ret = 0; \
+ } while (0)
+
+#define set_dh_params(dh, p, g, ret) \
+ do { \
+ dh->p = p; \
+ dh->q = NULL; \
+ dh->g = g; \
+ ret = 0; \
+ } while (0)
+
+#define set_dh_priv_key(dh, priv_key, ret) \
+ do { dh->priv_key = priv_key; ret = 0; } while (0)
+
+#define set_dsa_params(dsa, p, q, g, ret) \
+ do { dsa->p = p; dsa->q = q; dsa->g = g; ret = 0; } while (0)
+
+#define get_dh_pub_key(dh, pub_key) \
+ (pub_key = dh->pub_key)
+
+#define get_dh_priv_key(dh, priv_key) \
+ (priv_key = dh->priv_key)
+
+#define set_dsa_sign(sign, r, s) \
+ do { sign->r = r; sign->s = s; } while (0)
+
+#define get_dsa_sign(sign, r, s) \
+ do { r = sign->r; s = sign->s; } while (0)
+
+#define set_dsa_keys(dsa, pub, priv, ret) \
+ do { dsa->pub_key = pub; dsa->priv_key = priv; ret = 0; } while (0)
+
+#define set_dsa_pub_key(dsa, pub_key) \
+ (dsa->pub_key = pub_key)
+
+#define get_dsa_priv_key(dsa, priv_key) \
+ (priv_key = dsa->priv_key)
+
+#else
+
+#define set_rsa_params(rsa, p, q, ret) \
+ (ret = !RSA_set0_factors(rsa, p, q))
+
+#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
+ (ret = !RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp))
+
+/* n, e must be non-null, d can be NULL */
+#define set_rsa_keys(rsa, n, e, d, ret) \
+ (ret = !RSA_set0_key(rsa, n, e, d))
+
+#define set_dh_params(dh, p, g, ret) \
+ (ret = !DH_set0_pqg(dh, p, NULL, g))
+
+#define set_dh_priv_key(dh, priv_key, ret) \
+ (ret = !DH_set0_key(dh, NULL, priv_key))
+
+#define get_dh_pub_key(dh, pub_key) \
+ (DH_get0_key(dh_key, &pub_key, NULL))
+
+#define get_dh_priv_key(dh, priv_key) \
+ (DH_get0_key(dh_key, NULL, &priv_key))
+
+#define set_dsa_params(dsa, p, q, g, ret) \
+ (ret = !DSA_set0_pqg(dsa, p, q, g))
+
+#define set_dsa_priv_key(dsa, priv_key) \
+ (DSA_set0_key(dsa, NULL, priv_key))
+
+#define set_dsa_sign(sign, r, s) \
+ (DSA_SIG_set0(sign, r, s))
+
+#define get_dsa_sign(sign, r, s) \
+ (DSA_SIG_get0(sign, &r, &s))
+
+#define set_dsa_keys(dsa, pub, priv, ret) \
+ (ret = !DSA_set0_key(dsa, pub, priv))
+
+#define set_dsa_pub_key(dsa, pub_key) \
+ (DSA_set0_key(dsa, pub_key, NULL))
+
+#define get_dsa_priv_key(dsa, priv_key) \
+ (DSA_get0_key(dsa, NULL, &priv_key))
+
+#endif /* version < 10100000 */
+
+#endif /* __RTA_COMPAT_H__ */
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 93c6d7e5..7d263aba 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -14,6 +14,7 @@
#include <openssl/evp.h>
#include "rte_openssl_pmd_private.h"
+#include "compat.h"
#define DES_BLOCK_SIZE 8
@@ -119,7 +120,7 @@ get_cipher_key_ede(uint8_t *key, int keylen, uint8_t *key_ede)
memcpy(key_ede + 16, key, 8);
break;
default:
- OPENSSL_LOG_ERR("Unsupported key size");
+ OPENSSL_LOG(ERR, "Unsupported key size");
res = -EINVAL;
}
@@ -137,6 +138,9 @@ get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen,
switch (sess_algo) {
case RTE_CRYPTO_CIPHER_3DES_CBC:
switch (keylen) {
+ case 8:
+ *algo = EVP_des_cbc();
+ break;
case 16:
*algo = EVP_des_ede_cbc();
break;
@@ -677,7 +681,7 @@ openssl_set_session_parameters(struct openssl_session *sess,
ret = openssl_set_session_cipher_parameters(
sess, cipher_xform);
if (ret != 0) {
- OPENSSL_LOG_ERR(
+ OPENSSL_LOG(ERR,
"Invalid/unsupported cipher parameters");
return ret;
}
@@ -686,7 +690,7 @@ openssl_set_session_parameters(struct openssl_session *sess,
if (auth_xform) {
ret = openssl_set_session_auth_parameters(sess, auth_xform);
if (ret != 0) {
- OPENSSL_LOG_ERR(
+ OPENSSL_LOG(ERR,
"Invalid/unsupported auth parameters");
return ret;
}
@@ -695,7 +699,7 @@ openssl_set_session_parameters(struct openssl_session *sess,
if (aead_xform) {
ret = openssl_set_session_aead_parameters(sess, aead_xform);
if (ret != 0) {
- OPENSSL_LOG_ERR(
+ OPENSSL_LOG(ERR,
"Invalid/unsupported AEAD parameters");
return ret;
}
@@ -727,19 +731,36 @@ openssl_reset_session(struct openssl_session *sess)
}
/** Provide session for operation */
-static struct openssl_session *
+static void *
get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
{
struct openssl_session *sess = NULL;
+ struct openssl_asym_session *asym_sess = NULL;
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- /* get existing session */
- if (likely(op->sym->session != NULL))
- sess = (struct openssl_session *)
- get_session_private_data(
- op->sym->session,
- cryptodev_driver_id);
+ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL))
+ sess = (struct openssl_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ if (likely(op->asym->session != NULL))
+ asym_sess = (struct openssl_asym_session *)
+ get_asym_session_private_data(
+ op->asym->session,
+ cryptodev_driver_id);
+ if (asym_sess == NULL)
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return asym_sess;
+ }
} else {
+ /* sessionless asymmetric not supported */
+ if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
+ return NULL;
+
/* provide internal session */
void *_sess = NULL;
void *_sess_private_data = NULL;
@@ -759,8 +780,8 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (sess == NULL)
@@ -884,7 +905,7 @@ process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
return 0;
process_cipher_encrypt_err:
- OPENSSL_LOG_ERR("Process openssl cipher encrypt failed");
+ OPENSSL_LOG(ERR, "Process openssl cipher encrypt failed");
return -EINVAL;
}
@@ -908,7 +929,7 @@ process_openssl_cipher_bpi_encrypt(uint8_t *src, uint8_t *dst,
return 0;
process_cipher_encrypt_err:
- OPENSSL_LOG_ERR("Process openssl cipher bpi encrypt failed");
+ OPENSSL_LOG(ERR, "Process openssl cipher bpi encrypt failed");
return -EINVAL;
}
/** Process standard openssl cipher decryption */
@@ -932,7 +953,7 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
return 0;
process_cipher_decrypt_err:
- OPENSSL_LOG_ERR("Process openssl cipher decrypt failed");
+ OPENSSL_LOG(ERR, "Process openssl cipher decrypt failed");
return -EINVAL;
}
@@ -989,7 +1010,7 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst,
return 0;
process_cipher_des3ctr_err:
- OPENSSL_LOG_ERR("Process openssl cipher des 3 ede ctr failed");
+ OPENSSL_LOG(ERR, "Process openssl cipher des 3 ede ctr failed");
return -EINVAL;
}
@@ -1027,7 +1048,7 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset,
return 0;
process_auth_encryption_gcm_err:
- OPENSSL_LOG_ERR("Process openssl auth encryption gcm failed");
+ OPENSSL_LOG(ERR, "Process openssl auth encryption gcm failed");
return -EINVAL;
}
@@ -1068,7 +1089,7 @@ process_openssl_auth_encryption_ccm(struct rte_mbuf *mbuf_src, int offset,
return 0;
process_auth_encryption_ccm_err:
- OPENSSL_LOG_ERR("Process openssl auth encryption ccm failed");
+ OPENSSL_LOG(ERR, "Process openssl auth encryption ccm failed");
return -EINVAL;
}
@@ -1106,7 +1127,7 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
return 0;
process_auth_decryption_gcm_err:
- OPENSSL_LOG_ERR("Process openssl auth decryption gcm failed");
+ OPENSSL_LOG(ERR, "Process openssl auth decryption gcm failed");
return -EINVAL;
}
@@ -1145,7 +1166,7 @@ process_openssl_auth_decryption_ccm(struct rte_mbuf *mbuf_src, int offset,
return 0;
process_auth_decryption_ccm_err:
- OPENSSL_LOG_ERR("Process openssl auth decryption ccm failed");
+ OPENSSL_LOG(ERR, "Process openssl auth decryption ccm failed");
return -EINVAL;
}
@@ -1198,7 +1219,7 @@ process_auth_final:
return 0;
process_auth_err:
- OPENSSL_LOG_ERR("Process openssl auth failed");
+ OPENSSL_LOG(ERR, "Process openssl auth failed");
return -EINVAL;
}
@@ -1251,7 +1272,7 @@ process_auth_final:
return 0;
process_auth_err:
- OPENSSL_LOG_ERR("Process openssl auth failed");
+ OPENSSL_LOG(ERR, "Process openssl auth failed");
return -EINVAL;
}
@@ -1525,6 +1546,433 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
+/* process dsa sign operation */
+static int
+process_openssl_dsa_sign_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dsa_op_param *op = &cop->asym->dsa;
+ DSA *dsa = sess->u.s.dsa;
+ DSA_SIG *sign = NULL;
+
+ sign = DSA_do_sign(op->message.data,
+ op->message.length,
+ dsa);
+
+ if (sign == NULL) {
+ OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__);
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ } else {
+ const BIGNUM *r = NULL, *s = NULL;
+ get_dsa_sign(sign, r, s);
+
+ op->r.length = BN_bn2bin(r, op->r.data);
+ op->s.length = BN_bn2bin(s, op->s.data);
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+
+ DSA_SIG_free(sign);
+
+ return 0;
+}
+
+/* process dsa verify operation */
+static int
+process_openssl_dsa_verify_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dsa_op_param *op = &cop->asym->dsa;
+ DSA *dsa = sess->u.s.dsa;
+ int ret;
+ DSA_SIG *sign = DSA_SIG_new();
+ BIGNUM *r = NULL, *s = NULL;
+ BIGNUM *pub_key = NULL;
+
+ if (sign == NULL) {
+ OPENSSL_LOG(ERR, " %s:%d\n", __func__, __LINE__);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ r = BN_bin2bn(op->r.data,
+ op->r.length,
+ r);
+ s = BN_bin2bn(op->s.data,
+ op->s.length,
+ s);
+ pub_key = BN_bin2bn(op->y.data,
+ op->y.length,
+ pub_key);
+ if (!r || !s || !pub_key) {
+ if (r)
+ BN_free(r);
+ if (s)
+ BN_free(s);
+ if (pub_key)
+ BN_free(pub_key);
+
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dsa_sign(sign, r, s);
+ set_dsa_pub_key(dsa, pub_key);
+
+ ret = DSA_do_verify(op->message.data,
+ op->message.length,
+ sign,
+ dsa);
+
+ if (ret != 1)
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ DSA_SIG_free(sign);
+
+ return 0;
+}
+
+/* process dh operation */
+static int
+process_openssl_dh_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dh_op_param *op = &cop->asym->dh;
+ DH *dh_key = sess->u.dh.dh_key;
+ BIGNUM *priv_key = NULL;
+ int ret = 0;
+
+ if (sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE)) {
+ /* compute shared secret using peer public key
+ * and current private key
+ * shared secret = peer_key ^ priv_key mod p
+ */
+ BIGNUM *peer_key = NULL;
+
+ /* copy private key and peer key and compute shared secret */
+ peer_key = BN_bin2bn(op->pub_key.data,
+ op->pub_key.length,
+ peer_key);
+ if (peer_key == NULL) {
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ priv_key = BN_bin2bn(op->priv_key.data,
+ op->priv_key.length,
+ priv_key);
+ if (priv_key == NULL) {
+ BN_free(peer_key);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dh_priv_key(dh_key, priv_key, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to set private key\n");
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(peer_key);
+ BN_free(priv_key);
+ return 0;
+ }
+
+ ret = DH_compute_key(
+ op->shared_secret.data,
+ peer_key, dh_key);
+ if (ret < 0) {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(peer_key);
+ /* priv key is already loaded into dh,
+ * let's not free that directly here.
+ * DH_free() will auto free it later.
+ */
+ return 0;
+ }
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->shared_secret.length = ret;
+ BN_free(peer_key);
+ return 0;
+ }
+
+ /*
+ * other options are public and private key generations.
+ *
+ * if user provides private key,
+ * then first set DH with user provided private key
+ */
+ if ((sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) &&
+ !(sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE))) {
+ /* generate public key using user-provided private key
+ * pub_key = g ^ priv_key mod p
+ */
+
+ /* load private key into DH */
+ priv_key = BN_bin2bn(op->priv_key.data,
+ op->priv_key.length,
+ priv_key);
+ if (priv_key == NULL) {
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dh_priv_key(dh_key, priv_key, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to set private key\n");
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(priv_key);
+ return 0;
+ }
+ }
+
+ /* generate public and private key pair.
+ *
+ * if private key already set, generates only public key.
+ *
+ * if private key is not already set, then set it to random value
+ * and update internal private key.
+ */
+ if (!DH_generate_key(dh_key)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return 0;
+ }
+
+ if (sess->u.dh.key_op & (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) {
+ const BIGNUM *pub_key = NULL;
+
+ OPENSSL_LOG(DEBUG, "%s:%d update public key\n",
+ __func__, __LINE__);
+
+ /* get the generated keys */
+ get_dh_pub_key(dh_key, pub_key);
+
+ /* output public key */
+ op->pub_key.length = BN_bn2bin(pub_key,
+ op->pub_key.data);
+ }
+
+ if (sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE)) {
+ const BIGNUM *priv_key = NULL;
+
+ OPENSSL_LOG(DEBUG, "%s:%d updated priv key\n",
+ __func__, __LINE__);
+
+ /* get the generated keys */
+ get_dh_priv_key(dh_key, priv_key);
+
+ /* provide generated private key back to user */
+ op->priv_key.length = BN_bn2bin(priv_key,
+ op->priv_key.data);
+ }
+
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ return 0;
+}
+
+/* process modinv operation */
+static int
+process_openssl_modinv_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_asym_op *op = cop->asym;
+ BIGNUM *base = BN_CTX_get(sess->u.m.ctx);
+ BIGNUM *res = BN_CTX_get(sess->u.m.ctx);
+
+ if (unlikely(base == NULL || res == NULL)) {
+ if (base)
+ BN_free(base);
+ if (res)
+ BN_free(res);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ base = BN_bin2bn((const unsigned char *)op->modinv.base.data,
+ op->modinv.base.length, base);
+
+ if (BN_mod_inverse(res, base, sess->u.m.modulus, sess->u.m.ctx)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data);
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ return 0;
+}
+
+/* process modexp operation */
+static int
+process_openssl_modexp_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_asym_op *op = cop->asym;
+ BIGNUM *base = BN_CTX_get(sess->u.e.ctx);
+ BIGNUM *res = BN_CTX_get(sess->u.e.ctx);
+
+ if (unlikely(base == NULL || res == NULL)) {
+ if (base)
+ BN_free(base);
+ if (res)
+ BN_free(res);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ base = BN_bin2bn((const unsigned char *)op->modinv.base.data,
+ op->modinv.base.length, base);
+
+ if (BN_mod_exp(res, base, sess->u.e.exp,
+ sess->u.e.mod, sess->u.e.ctx)) {
+ op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data);
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ return 0;
+}
+
+/* process rsa operations */
+static int
+process_openssl_rsa_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ int ret = 0;
+ struct rte_crypto_asym_op *op = cop->asym;
+ RSA *rsa = sess->u.r.rsa;
+ uint32_t pad = (op->rsa.pad);
+
+ switch (pad) {
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT0:
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT1:
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT2:
+ pad = RSA_PKCS1_PADDING;
+ break;
+ case RTE_CRYPTO_RSA_PADDING_NONE:
+ pad = RSA_NO_PADDING;
+ break;
+ default:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ OPENSSL_LOG(ERR,
+ "rsa pad type not supported %d\n", pad);
+ return 0;
+ }
+
+ switch (op->rsa.op_type) {
+ case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+ ret = RSA_public_encrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.message.data,
+ rsa,
+ pad);
+
+ if (ret > 0)
+ op->rsa.message.length = ret;
+ OPENSSL_LOG(DEBUG,
+ "length of encrypted text %d\n", ret);
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_DECRYPT:
+ ret = RSA_private_decrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.message.data,
+ rsa,
+ pad);
+ if (ret > 0)
+ op->rsa.message.length = ret;
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_SIGN:
+ ret = RSA_private_encrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.sign.data,
+ rsa,
+ pad);
+ if (ret > 0)
+ op->rsa.sign.length = ret;
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_VERIFY:
+ ret = RSA_public_decrypt(op->rsa.sign.length,
+ op->rsa.sign.data,
+ op->rsa.sign.data,
+ rsa,
+ pad);
+
+ OPENSSL_LOG(DEBUG,
+ "Length of public_decrypt %d "
+ "length of message %zd\n",
+ ret, op->rsa.message.length);
+
+ if (memcmp(op->rsa.sign.data, op->rsa.message.data,
+ op->rsa.message.length)) {
+ OPENSSL_LOG(ERR,
+ "RSA sign Verification failed");
+ return -1;
+ }
+ break;
+
+ default:
+ /* allow ops with invalid args to be pushed to
+ * completion queue
+ */
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+
+ if (ret < 0)
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ return 0;
+}
+
+static int
+process_asym_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_asym_session *sess)
+{
+ int retval = 0;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ retval = process_openssl_rsa_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ retval = process_openssl_modexp_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ retval = process_openssl_modinv_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ retval = process_openssl_dh_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ if (op->asym->dsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN)
+ retval = process_openssl_dsa_sign_op(op, sess);
+ else if (op->asym->dsa.op_type ==
+ RTE_CRYPTO_ASYM_OP_VERIFY)
+ retval =
+ process_openssl_dsa_verify_op(op, sess);
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+ if (!retval) {
+ /* op processed so push to completion queue as processed */
+ retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
+ if (retval)
+ /* return error if failed to put in completion queue */
+ retval = -1;
+ }
+
+ return retval;
+}
+
/** Process crypto operation for mbuf */
static int
process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
@@ -1597,7 +2045,7 @@ static uint16_t
openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct openssl_session *sess;
+ void *sess;
struct openssl_qp *qp = queue_pair;
int i, retval;
@@ -1606,7 +2054,12 @@ openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
if (unlikely(sess == NULL))
goto enqueue_err;
- retval = process_op(qp, ops[i], sess);
+ if (ops[i]->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ retval = process_op(qp, ops[i],
+ (struct openssl_session *) sess);
+ else
+ retval = process_asym_op(qp, ops[i],
+ (struct openssl_asym_session *) sess);
if (unlikely(retval < 0))
goto enqueue_err;
}
@@ -1646,7 +2099,7 @@ cryptodev_openssl_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- OPENSSL_LOG_ERR("failed to create cryptodev vdev");
+ OPENSSL_LOG(ERR, "failed to create cryptodev vdev");
goto init_error;
}
@@ -1660,18 +2113,19 @@ cryptodev_openssl_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_CPU_AESNI |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
/* Set vector instructions mode supported */
internals = dev->data->dev_private;
internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- OPENSSL_LOG_ERR("driver %s: cryptodev_openssl_create failed",
+ OPENSSL_LOG(ERR, "driver %s: create failed",
init_params->name);
cryptodev_openssl_remove(vdev);
@@ -1686,8 +2140,7 @@ cryptodev_openssl_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct openssl_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -1731,7 +2184,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD,
cryptodev_openssl_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv,
cryptodev_openssl_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(openssl_init_log)
+{
+ openssl_logtype_driver = rte_log_register("pmd.crypto.openssl");
+}
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 1cb87d59..de228439 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -9,6 +9,7 @@
#include <rte_cryptodev_pmd.h>
#include "rte_openssl_pmd_private.h"
+#include "compat.h"
static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
@@ -397,7 +398,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
.block_size = 8,
.key_size = {
- .min = 16,
+ .min = 8,
.max = 24,
.increment = 8
},
@@ -469,6 +470,105 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
}, }
}, }
},
+ { /* RSA */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
+ .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY) |
+ (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |
+ (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),
+ {
+ .modlen = {
+ /* min length is based on openssl rsa keygen */
+ .min = 30,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* modexp */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+ .op_types = 0,
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* modinv */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODINV,
+ .op_types = 0,
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* dh */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DH,
+ .op_types =
+ ((1<<RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE) |
+ (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE |
+ (1 <<
+ RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE))),
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* dsa */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DSA,
+ .op_types =
+ ((1<<RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY)),
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -547,7 +647,8 @@ openssl_pmd_info_get(struct rte_cryptodev *dev,
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = openssl_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
}
}
@@ -588,14 +689,14 @@ openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- OPENSSL_LOG_INFO(
- "Reusing existing ring %s for processed ops",
+ OPENSSL_LOG(INFO,
+ "Reusing existing ring %s for processed ops",
qp->name);
return r;
}
- OPENSSL_LOG_ERR(
- "Unable to reuse existing ring %s for processed ops",
+ OPENSSL_LOG(ERR,
+ "Unable to reuse existing ring %s for processed ops",
qp->name);
return NULL;
}
@@ -647,22 +748,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-openssl_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-openssl_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
openssl_pmd_qp_count(struct rte_cryptodev *dev)
@@ -670,16 +755,23 @@ openssl_pmd_qp_count(struct rte_cryptodev *dev)
return dev->data->nb_queue_pairs;
}
-/** Returns the size of the session structure */
+/** Returns the size of the symmetric session structure */
static unsigned
-openssl_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct openssl_session);
}
+/** Returns the size of the asymmetric session structure */
+static unsigned
+openssl_pmd_asym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct openssl_asym_session);
+}
+
/** Configure the session from a crypto xform chain */
static int
-openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -688,46 +780,460 @@ openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
int ret;
if (unlikely(sess == NULL)) {
- OPENSSL_LOG_ERR("invalid session struct");
+ OPENSSL_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
+ OPENSSL_LOG(ERR,
"Couldn't get object from session mempool");
return -ENOMEM;
}
ret = openssl_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- OPENSSL_LOG_ERR("failed configure session parameters");
+ OPENSSL_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
}
+static int openssl_set_asym_session_parameters(
+ struct openssl_asym_session *asym_session,
+ struct rte_crypto_asym_xform *xform)
+{
+ int ret = 0;
+
+ if ((xform->xform_type != RTE_CRYPTO_ASYM_XFORM_DH) &&
+ (xform->next != NULL)) {
+ OPENSSL_LOG(ERR, "chained xfrms are not supported on %s",
+ rte_crypto_asym_xform_strings[xform->xform_type]);
+ return -1;
+ }
+
+ switch (xform->xform_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ {
+ BIGNUM *n = NULL;
+ BIGNUM *e = NULL;
+ BIGNUM *d = NULL;
+ BIGNUM *p = NULL, *q = NULL, *dmp1 = NULL;
+ BIGNUM *iqmp = NULL, *dmq1 = NULL;
+
+ /* copy xfrm data into rsa struct */
+ n = BN_bin2bn((const unsigned char *)xform->rsa.n.data,
+ xform->rsa.n.length, n);
+ e = BN_bin2bn((const unsigned char *)xform->rsa.e.data,
+ xform->rsa.e.length, e);
+
+ if (!n || !e)
+ goto err_rsa;
+
+ RSA *rsa = RSA_new();
+ if (rsa == NULL)
+ goto err_rsa;
+
+ if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_EXP) {
+ d = BN_bin2bn(
+ (const unsigned char *)xform->rsa.d.data,
+ xform->rsa.d.length,
+ d);
+ if (!d) {
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ } else {
+ p = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.p.data,
+ xform->rsa.qt.p.length,
+ p);
+ q = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.q.data,
+ xform->rsa.qt.q.length,
+ q);
+ dmp1 = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.dP.data,
+ xform->rsa.qt.dP.length,
+ dmp1);
+ dmq1 = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.dQ.data,
+ xform->rsa.qt.dQ.length,
+ dmq1);
+ iqmp = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.qInv.data,
+ xform->rsa.qt.qInv.length,
+ iqmp);
+
+ if (!p || !q || !dmp1 || !dmq1 || !iqmp) {
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ set_rsa_params(rsa, p, q, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR,
+ "failed to set rsa params\n");
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR,
+ "failed to set crt params\n");
+ RSA_free(rsa);
+ /*
+ * set already populated params to NULL
+ * as its freed by call to RSA_free
+ */
+ p = q = NULL;
+ goto err_rsa;
+ }
+ }
+
+ set_rsa_keys(rsa, n, e, d, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to load rsa keys\n");
+ RSA_free(rsa);
+ return -1;
+ }
+ asym_session->u.r.rsa = rsa;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_RSA;
+ break;
+err_rsa:
+ if (n)
+ BN_free(n);
+ if (e)
+ BN_free(e);
+ if (d)
+ BN_free(d);
+ if (p)
+ BN_free(p);
+ if (q)
+ BN_free(q);
+ if (dmp1)
+ BN_free(dmp1);
+ if (dmq1)
+ BN_free(dmq1);
+ if (iqmp)
+ BN_free(iqmp);
+
+ return -1;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ {
+ struct rte_crypto_modex_xform *xfrm = &(xform->modex);
+
+ BN_CTX *ctx = BN_CTX_new();
+ if (ctx == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ return -1;
+ }
+ BN_CTX_start(ctx);
+ BIGNUM *mod = BN_CTX_get(ctx);
+ BIGNUM *exp = BN_CTX_get(ctx);
+ if (mod == NULL || exp == NULL) {
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ return -1;
+ }
+
+ mod = BN_bin2bn((const unsigned char *)
+ xfrm->modulus.data,
+ xfrm->modulus.length, mod);
+ exp = BN_bin2bn((const unsigned char *)
+ xfrm->exponent.data,
+ xfrm->exponent.length, exp);
+ asym_session->u.e.ctx = ctx;
+ asym_session->u.e.mod = mod;
+ asym_session->u.e.exp = exp;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
+ break;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ {
+ struct rte_crypto_modinv_xform *xfrm = &(xform->modinv);
+
+ BN_CTX *ctx = BN_CTX_new();
+ if (ctx == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ return -1;
+ }
+ BN_CTX_start(ctx);
+ BIGNUM *mod = BN_CTX_get(ctx);
+ if (mod == NULL) {
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ return -1;
+ }
+
+ mod = BN_bin2bn((const unsigned char *)
+ xfrm->modulus.data,
+ xfrm->modulus.length,
+ mod);
+ asym_session->u.m.ctx = ctx;
+ asym_session->u.m.modulus = mod;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODINV;
+ break;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ {
+ BIGNUM *p = NULL;
+ BIGNUM *g = NULL;
+
+ p = BN_bin2bn((const unsigned char *)
+ xform->dh.p.data,
+ xform->dh.p.length,
+ p);
+ g = BN_bin2bn((const unsigned char *)
+ xform->dh.g.data,
+ xform->dh.g.length,
+ g);
+ if (!p || !g)
+ goto err_dh;
+
+ DH *dh = DH_new();
+ if (dh == NULL) {
+ OPENSSL_LOG(ERR,
+ "failed to allocate resources\n");
+ goto err_dh;
+ }
+ set_dh_params(dh, p, g, ret);
+ if (ret) {
+ DH_free(dh);
+ goto err_dh;
+ }
+
+ /*
+ * setup xfrom for
+ * public key generate, or
+ * DH Priv key generate, or both
+ * public and private key generate
+ */
+ asym_session->u.dh.key_op = (1 << xform->dh.type);
+
+ if (xform->dh.type ==
+ RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE) {
+ /* check if next is pubkey */
+ if ((xform->next != NULL) &&
+ (xform->next->xform_type ==
+ RTE_CRYPTO_ASYM_XFORM_DH) &&
+ (xform->next->dh.type ==
+ RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)
+ ) {
+ /*
+ * setup op as pub/priv key
+ * pair generationi
+ */
+ asym_session->u.dh.key_op |=
+ (1 <<
+ RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE);
+ }
+ }
+ asym_session->u.dh.dh_key = dh;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DH;
+ break;
+
+err_dh:
+ OPENSSL_LOG(ERR, " failed to set dh params\n");
+ if (p)
+ BN_free(p);
+ if (g)
+ BN_free(g);
+ return -1;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ {
+ BIGNUM *p = NULL, *g = NULL;
+ BIGNUM *q = NULL, *priv_key = NULL;
+ BIGNUM *pub_key = BN_new();
+ BN_zero(pub_key);
+
+ p = BN_bin2bn((const unsigned char *)
+ xform->dsa.p.data,
+ xform->dsa.p.length,
+ p);
+
+ g = BN_bin2bn((const unsigned char *)
+ xform->dsa.g.data,
+ xform->dsa.g.length,
+ g);
+
+ q = BN_bin2bn((const unsigned char *)
+ xform->dsa.q.data,
+ xform->dsa.q.length,
+ q);
+ if (!p || !q || !g)
+ goto err_dsa;
+
+ priv_key = BN_bin2bn((const unsigned char *)
+ xform->dsa.x.data,
+ xform->dsa.x.length,
+ priv_key);
+ if (priv_key == NULL)
+ goto err_dsa;
+
+ DSA *dsa = DSA_new();
+ if (dsa == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ goto err_dsa;
+ }
+
+ set_dsa_params(dsa, p, q, g, ret);
+ if (ret) {
+ DSA_free(dsa);
+ OPENSSL_LOG(ERR, "Failed to dsa params\n");
+ goto err_dsa;
+ }
+
+ /*
+ * openssl 1.1.0 mandate that public key can't be
+ * NULL in very first call. so set a dummy pub key.
+ * to keep consistency, lets follow same approach for
+ * both versions
+ */
+ /* just set dummy public for very 1st call */
+ set_dsa_keys(dsa, pub_key, priv_key, ret);
+ if (ret) {
+ DSA_free(dsa);
+ OPENSSL_LOG(ERR, "Failed to set keys\n");
+ return -1;
+ }
+ asym_session->u.s.dsa = dsa;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DSA;
+ break;
+
+err_dsa:
+ if (p)
+ BN_free(p);
+ if (q)
+ BN_free(q);
+ if (g)
+ BN_free(g);
+ if (priv_key)
+ BN_free(priv_key);
+ if (pub_key)
+ BN_free(pub_key);
+ return -1;
+ }
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Configure the session from a crypto xform chain */
+static int
+openssl_pmd_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *asym_sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ OPENSSL_LOG(ERR, "invalid asymmetric session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &asym_sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = openssl_set_asym_session_parameters(asym_sess_private_data,
+ xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, asym_sess_private_data);
+ return ret;
+ }
+
+ set_asym_session_private_data(sess, dev->driver_id,
+ asym_sess_private_data);
+
+ return 0;
+}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-openssl_pmd_session_clear(struct rte_cryptodev *dev,
+openssl_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
openssl_reset_session(sess_priv);
memset(sess_priv, 0, sizeof(struct openssl_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static void openssl_reset_asym_session(struct openssl_asym_session *sess)
+{
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ if (sess->u.r.rsa)
+ RSA_free(sess->u.r.rsa);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ if (sess->u.e.ctx) {
+ BN_CTX_end(sess->u.e.ctx);
+ BN_CTX_free(sess->u.e.ctx);
+ }
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ if (sess->u.m.ctx) {
+ BN_CTX_end(sess->u.m.ctx);
+ BN_CTX_free(sess->u.m.ctx);
+ }
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ if (sess->u.dh.dh_key)
+ DH_free(sess->u.dh.dh_key);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ if (sess->u.s.dsa)
+ DSA_free(sess->u.s.dsa);
+ break;
+ default:
+ break;
+ }
+}
+
+/** Clear the memory of asymmetric session
+ * so it doesn't leave key material behind
+ */
+static void
+openssl_pmd_asym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_asym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ openssl_reset_asym_session(sess_priv);
+ memset(sess_priv, 0, sizeof(struct openssl_asym_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_asym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -745,13 +1251,14 @@ struct rte_cryptodev_ops openssl_pmd_ops = {
.queue_pair_setup = openssl_pmd_qp_setup,
.queue_pair_release = openssl_pmd_qp_release,
- .queue_pair_start = openssl_pmd_qp_start,
- .queue_pair_stop = openssl_pmd_qp_stop,
.queue_pair_count = openssl_pmd_qp_count,
- .session_get_size = openssl_pmd_session_get_size,
- .session_configure = openssl_pmd_session_configure,
- .session_clear = openssl_pmd_session_clear
+ .sym_session_get_size = openssl_pmd_sym_session_get_size,
+ .asym_session_get_size = openssl_pmd_asym_session_get_size,
+ .sym_session_configure = openssl_pmd_sym_session_configure,
+ .asym_session_configure = openssl_pmd_asym_session_configure,
+ .sym_session_clear = openssl_pmd_sym_session_clear,
+ .asym_session_clear = openssl_pmd_asym_session_clear
};
struct rte_cryptodev_ops *rte_openssl_pmd_ops = &openssl_pmd_ops;
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h
index bc8dc7cd..a8f2c848 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_private.h
+++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -8,29 +8,19 @@
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/des.h>
+#include <openssl/rsa.h>
+#include <openssl/dh.h>
+#include <openssl/dsa.h>
#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
/**< Open SSL Crypto PMD device name */
-#define OPENSSL_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_OPENSSL_DEBUG
-#define OPENSSL_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
- __func__, __LINE__, ## args)
-
-#define OPENSSL_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define OPENSSL_LOG_INFO(fmt, args...)
-#define OPENSSL_LOG_DBG(fmt, args...)
-#endif
+/** OPENSSL PMD LOGTYPE DRIVER */
+int openssl_logtype_driver;
+#define OPENSSL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, openssl_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
/* Maximum length for digest (SHA-512 needs 64 bytes) */
#define DIGEST_LENGTH_MAX 64
@@ -62,8 +52,6 @@ enum openssl_auth_mode {
struct openssl_private {
unsigned int max_nb_qpairs;
/**< Max number of queue pairs */
- unsigned int max_nb_sessions;
- /**< Max number of sessions */
};
/** OPENSSL crypto queue pair */
@@ -157,6 +145,31 @@ struct openssl_session {
} __rte_cache_aligned;
+/** OPENSSL crypto private asymmetric session structure */
+struct openssl_asym_session {
+ enum rte_crypto_asym_xform_type xfrm_type;
+ union {
+ struct rsa {
+ RSA *rsa;
+ } r;
+ struct exp {
+ BIGNUM *exp;
+ BIGNUM *mod;
+ BN_CTX *ctx;
+ } e;
+ struct mod {
+ BIGNUM *modulus;
+ BN_CTX *ctx;
+ } m;
+ struct dh {
+ DH *dh_key;
+ uint32_t key_op;
+ } dh;
+ struct {
+ DSA *dsa;
+ } s;
+ } u;
+} __rte_cache_aligned;
/** Set and validate OPENSSL crypto session parameters */
extern int
openssl_set_session_parameters(struct openssl_session *sess,
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
deleted file mode 100644
index 07266a5e..00000000
--- a/drivers/crypto/qat/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2015-2018 Intel Corporation
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_pmd_qat.a
-
-# library version
-LIBABIVER := 1
-
-# build flags
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -O3
-
-# external library include paths
-CFLAGS += -I$(SRCDIR)/qat_adf
-LDLIBS += -lcrypto
-LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
-LDLIBS += -lrte_cryptodev
-LDLIBS += -lrte_pci -lrte_bus_pci
-
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_crypto.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_qp.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_adf/qat_algs_build_desc.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += rte_qat_cryptodev.c
-
-# export include files
-SYMLINK-y-include +=
-
-# versioning export map
-EXPORT_MAP := rte_pmd_qat_version.map
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/qat/README b/drivers/crypto/qat/README
new file mode 100644
index 00000000..444ae605
--- /dev/null
+++ b/drivers/crypto/qat/README
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+Makefile for crypto QAT PMD is in common/qat directory.
+The build for the QAT driver is done from there as only one library is built for the
+whole QAT pci device and that library includes all the services (crypto, compression)
+which are enabled on the device.
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index 006cd655..9cc98d2c 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -1,14 +1,18 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017-2018 Intel Corporation
+# this does not build the QAT driver, instead that is done in the compression
+# driver which comes later. Here we just add our sources files to the list
+build = false
dep = dependency('libcrypto', required: false)
-if not dep.found()
- build = false
+qat_includes += include_directories('.')
+qat_deps += 'cryptodev'
+if dep.found()
+ # Add our sources files to the list
+ qat_sources += files('qat_sym_pmd.c',
+ 'qat_sym.c',
+ 'qat_sym_session.c')
+ qat_ext_deps += dep
+ pkgconfig_extra_libs += '-lcrypto'
+ qat_cflags += '-DBUILD_QAT_SYM'
endif
-sources = files('qat_crypto.c', 'qat_qp.c',
- 'qat_adf/qat_algs_build_desc.c',
- 'rte_qat_cryptodev.c')
-includes += include_directories('qat_adf')
-deps += ['bus_pci']
-ext_deps += dep
-pkgconfig_extra_libs += '-lcrypto'
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
deleted file mode 100644
index 88bd5f00..00000000
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
- * Copyright(c) 2015-2018 Intel Corporation
- */
-#ifndef _ICP_QAT_ALGS_H_
-#define _ICP_QAT_ALGS_H_
-#include <rte_memory.h>
-#include <rte_crypto.h>
-#include "icp_qat_hw.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-#include "../qat_crypto.h"
-
-/*
- * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
- * Integrity Key (IK)
- */
-#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
-
-#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
-
-/* 3DES key sizes */
-#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
-#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
-
-#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_NO_CONVERT, \
- ICP_QAT_HW_CIPHER_ENCRYPT)
-
-#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_KEY_CONVERT, \
- ICP_QAT_HW_CIPHER_DECRYPT)
-
-struct qat_alg_buf {
- uint32_t len;
- uint32_t resrvd;
- uint64_t addr;
-} __rte_packed;
-
-enum qat_crypto_proto_flag {
- QAT_CRYPTO_PROTO_FLAG_NONE = 0,
- QAT_CRYPTO_PROTO_FLAG_CCM = 1,
- QAT_CRYPTO_PROTO_FLAG_GCM = 2,
- QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
- QAT_CRYPTO_PROTO_FLAG_ZUC = 4
-};
-
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SGL_MAX_NUMBER 16
-
-struct qat_alg_buf_list {
- uint64_t resrvd;
- uint32_t num_bufs;
- uint32_t num_mapped_bufs;
- struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER];
-} __rte_packed __rte_cache_aligned;
-
-struct qat_crypto_op_cookie {
- struct qat_alg_buf_list qat_sgl_list_src;
- struct qat_alg_buf_list qat_sgl_list_dst;
- rte_iova_t qat_sgl_src_phys_addr;
- rte_iova_t qat_sgl_dst_phys_addr;
-};
-
-/* Common content descriptor */
-struct qat_alg_cd {
- struct icp_qat_hw_cipher_algo_blk cipher;
- struct icp_qat_hw_auth_algo_blk hash;
-} __rte_packed __rte_cache_aligned;
-
-struct qat_session {
- enum icp_qat_fw_la_cmd_id qat_cmd;
- enum icp_qat_hw_cipher_algo qat_cipher_alg;
- enum icp_qat_hw_cipher_dir qat_dir;
- enum icp_qat_hw_cipher_mode qat_mode;
- enum icp_qat_hw_auth_algo qat_hash_alg;
- enum icp_qat_hw_auth_op auth_op;
- void *bpi_ctx;
- struct qat_alg_cd cd;
- uint8_t *cd_cur_ptr;
- rte_iova_t cd_paddr;
- struct icp_qat_fw_la_bulk_req fw_req;
- uint8_t aad_len;
- struct qat_crypto_instance *inst;
- struct {
- uint16_t offset;
- uint16_t length;
- } cipher_iv;
- struct {
- uint16_t offset;
- uint16_t length;
- } auth_iv;
- uint16_t digest_length;
- rte_spinlock_t lock; /* protects this struct */
- enum qat_device_gen min_qat_dev_gen;
-};
-
-int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg);
-
-int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd,
- uint8_t *enckey,
- uint32_t enckeylen);
-
-int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
- uint8_t *authkey,
- uint32_t authkeylen,
- uint32_t aad_length,
- uint32_t digestsize,
- unsigned int operation);
-
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- enum qat_crypto_proto_flag proto_flags);
-
-int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_aes_docsisbpi_key(int key_len,
- enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
-int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-#endif
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
deleted file mode 100644
index d9ce2a13..00000000
--- a/drivers/crypto/qat/qat_crypto.c
+++ /dev/null
@@ -1,1696 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <strings.h>
-#include <string.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <sys/queue.h>
-#include <stdarg.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_memory.h>
-#include <rte_tailq.h>
-#include <rte_malloc.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_string_fns.h>
-#include <rte_spinlock.h>
-#include <rte_hexdump.h>
-#include <rte_crypto_sym.h>
-#include <rte_byteorder.h>
-#include <rte_pci.h>
-#include <rte_bus_pci.h>
-
-#include <openssl/evp.h>
-
-#include "qat_logs.h"
-#include "qat_algs.h"
-#include "qat_crypto.h"
-#include "adf_transport_access_macros.h"
-
-#define BYTE_LENGTH 8
-/* bpi is only used for partial blocks of DES and AES
- * so AES block len can be assumed as max len for iv, src and dst
- */
-#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
-
-static int
-qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
- struct qat_pmd_private *internals) {
- int i = 0;
- const struct rte_cryptodev_capabilities *capability;
-
- while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
- RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
- continue;
-
- if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
- continue;
-
- if (capability->sym.cipher.algo == algo)
- return 1;
- }
- return 0;
-}
-
-static int
-qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
- struct qat_pmd_private *internals) {
- int i = 0;
- const struct rte_cryptodev_capabilities *capability;
-
- while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
- RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
- continue;
-
- if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
- continue;
-
- if (capability->sym.auth.algo == algo)
- return 1;
- }
- return 0;
-}
-
-/** Encrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt the IV, then XOR this with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_encrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-
-cipher_encrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
- return -EINVAL;
-}
-
-/** Decrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_decrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-
-cipher_decrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
- return -EINVAL;
-}
-
-/** Creates a context in either AES or DES in ECB mode
- * Depends on openssl libcrypto
- */
-static int
-bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
- enum rte_crypto_cipher_operation direction __rte_unused,
- uint8_t *key, void **ctx)
-{
- const EVP_CIPHER *algo = NULL;
- int ret;
- *ctx = EVP_CIPHER_CTX_new();
-
- if (*ctx == NULL) {
- ret = -ENOMEM;
- goto ctx_init_err;
- }
-
- if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
- algo = EVP_des_ecb();
- else
- algo = EVP_aes_128_ecb();
-
- /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
- if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
- ret = -EINVAL;
- goto ctx_init_err;
- }
-
- return 0;
-
-ctx_init_err:
- if (*ctx != NULL)
- EVP_CIPHER_CTX_free(*ctx);
- return ret;
-}
-
-/** Frees a context previously created
- * Depends on openssl libcrypto
- */
-static void
-bpi_cipher_ctx_free(void *bpi_ctx)
-{
- if (bpi_ctx != NULL)
- EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
-}
-
-static inline uint32_t
-adf_modulo(uint32_t data, uint32_t shift);
-
-static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
-
-void
-qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
- struct rte_cryptodev_sym_session *sess)
-{
- PMD_INIT_FUNC_TRACE();
- uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
- struct qat_session *s = (struct qat_session *)sess_priv;
-
- if (sess_priv) {
- if (s->bpi_ctx)
- bpi_cipher_ctx_free(s->bpi_ctx);
- memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
- struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
- rte_mempool_put(sess_mp, sess_priv);
- }
-}
-
-static int
-qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
-{
- /* Cipher Only */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
- return ICP_QAT_FW_LA_CMD_CIPHER;
-
- /* Authentication Only */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
- return ICP_QAT_FW_LA_CMD_AUTH;
-
- /* AEAD */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- /* AES-GCM and AES-CCM works with different direction
- * GCM first encrypts and generate hash where AES-CCM
- * first generate hash and encrypts. Similar relation
- * applies to decryption.
- */
- if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
- if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
- else
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
- else
- if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
- else
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
- }
-
- if (xform->next == NULL)
- return -1;
-
- /* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
-
- /* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
-
- return -1;
-}
-
-static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
-{
- do {
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
- return &xform->auth;
-
- xform = xform->next;
- } while (xform);
-
- return NULL;
-}
-
-static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
-{
- do {
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
- return &xform->cipher;
-
- xform = xform->next;
- } while (xform);
-
- return NULL;
-}
-
-int
-qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
-{
- struct qat_pmd_private *internals = dev->data->dev_private;
- struct rte_crypto_cipher_xform *cipher_xform = NULL;
- int ret;
-
- /* Get cipher xform from crypto xform chain */
- cipher_xform = qat_get_cipher_xform(xform);
-
- session->cipher_iv.offset = cipher_xform->iv.offset;
- session->cipher_iv.length = cipher_xform->iv.length;
-
- switch (cipher_xform->algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- if (qat_alg_validate_aes_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- if (qat_alg_validate_aes_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- break;
- case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
- if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
- break;
- case RTE_CRYPTO_CIPHER_NULL:
- session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
- break;
- case RTE_CRYPTO_CIPHER_KASUMI_F8:
- if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
- break;
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- if (qat_alg_validate_3des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_DES_CBC:
- if (qat_alg_validate_des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_3DES_CTR:
- if (qat_alg_validate_3des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- break;
- case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
- ret = bpi_cipher_ctx_init(
- cipher_xform->algo,
- cipher_xform->op,
- cipher_xform->key.data,
- &session->bpi_ctx);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
- goto error_out;
- }
- if (qat_alg_validate_des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
- ret = bpi_cipher_ctx_init(
- cipher_xform->algo,
- cipher_xform->op,
- cipher_xform->key.data,
- &session->bpi_ctx);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
- goto error_out;
- }
- if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_ZUC_EEA3:
- if (!qat_is_cipher_alg_supported(
- cipher_xform->algo, internals)) {
- PMD_DRV_LOG(ERR, "%s not supported on this device",
- rte_crypto_cipher_algorithm_strings
- [cipher_xform->algo]);
- ret = -ENOTSUP;
- goto error_out;
- }
- if (qat_alg_validate_zuc_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
- break;
- case RTE_CRYPTO_CIPHER_3DES_ECB:
- case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_AES_F8:
- case RTE_CRYPTO_CIPHER_AES_XTS:
- case RTE_CRYPTO_CIPHER_ARC4:
- PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
- cipher_xform->algo);
- ret = -ENOTSUP;
- goto error_out;
- default:
- PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
- ret = -EINVAL;
- goto error_out;
- }
-
- if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
- session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- else
- session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- cipher_xform->key.data,
- cipher_xform->key.length)) {
- ret = -EINVAL;
- goto error_out;
- }
-
- return 0;
-
-error_out:
- if (session->bpi_ctx) {
- bpi_cipher_ctx_free(session->bpi_ctx);
- session->bpi_ctx = NULL;
- }
- return ret;
-}
-
-int
-qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct rte_cryptodev_sym_session *sess,
- struct rte_mempool *mempool)
-{
- void *sess_private_data;
- int ret;
-
- if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
- return -ENOMEM;
- }
-
- ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
- "session parameters");
-
- /* Return session to mempool */
- rte_mempool_put(mempool, sess_private_data);
- return ret;
- }
-
- set_session_private_data(sess, dev->driver_id,
- sess_private_data);
-
- return 0;
-}
-
-int
-qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private)
-{
- struct qat_session *session = session_private;
- int ret;
-
- int qat_cmd_id;
- PMD_INIT_FUNC_TRACE();
-
- /* Set context descriptor physical address */
- session->cd_paddr = rte_mempool_virt2iova(session) +
- offsetof(struct qat_session, cd);
-
- session->min_qat_dev_gen = QAT_GEN1;
-
- /* Get requested QAT command id */
- qat_cmd_id = qat_get_cmd_id(xform);
- if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
- PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
- return -ENOTSUP;
- }
- session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
- switch (session->qat_cmd) {
- case ICP_QAT_FW_LA_CMD_CIPHER:
- ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
- if (ret < 0)
- return ret;
- break;
- case ICP_QAT_FW_LA_CMD_AUTH:
- ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
- if (ret < 0)
- return ret;
- break;
- case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- ret = qat_crypto_sym_configure_session_aead(xform,
- session);
- if (ret < 0)
- return ret;
- } else {
- ret = qat_crypto_sym_configure_session_cipher(dev,
- xform, session);
- if (ret < 0)
- return ret;
- ret = qat_crypto_sym_configure_session_auth(dev,
- xform, session);
- if (ret < 0)
- return ret;
- }
- break;
- case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- ret = qat_crypto_sym_configure_session_aead(xform,
- session);
- if (ret < 0)
- return ret;
- } else {
- ret = qat_crypto_sym_configure_session_auth(dev,
- xform, session);
- if (ret < 0)
- return ret;
- ret = qat_crypto_sym_configure_session_cipher(dev,
- xform, session);
- if (ret < 0)
- return ret;
- }
- break;
- case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
- case ICP_QAT_FW_LA_CMD_TRNG_TEST:
- case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
- case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
- case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
- case ICP_QAT_FW_LA_CMD_MGF1:
- case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
- case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
- case ICP_QAT_FW_LA_CMD_DELIMITER:
- PMD_DRV_LOG(ERR, "Unsupported Service %u",
- session->qat_cmd);
- return -ENOTSUP;
- default:
- PMD_DRV_LOG(ERR, "Unsupported Service %u",
- session->qat_cmd);
- return -ENOTSUP;
- }
-
- return 0;
-}
-
-int
-qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
-{
- struct rte_crypto_auth_xform *auth_xform = NULL;
- struct qat_pmd_private *internals = dev->data->dev_private;
- auth_xform = qat_get_auth_xform(xform);
- uint8_t *key_data = auth_xform->key.data;
- uint8_t key_length = auth_xform->key.length;
-
- switch (auth_xform->algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
- break;
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
- break;
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
- break;
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
- break;
- case RTE_CRYPTO_AUTH_SHA512_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
- break;
- case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
- break;
- case RTE_CRYPTO_AUTH_AES_GMAC:
- if (qat_alg_validate_aes_key(auth_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES key size");
- return -EINVAL;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
-
- break;
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
- break;
- case RTE_CRYPTO_AUTH_MD5_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
- break;
- case RTE_CRYPTO_AUTH_NULL:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
- break;
- case RTE_CRYPTO_AUTH_KASUMI_F9:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
- break;
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
- if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
- PMD_DRV_LOG(ERR, "%s not supported on this device",
- rte_crypto_auth_algorithm_strings
- [auth_xform->algo]);
- return -ENOTSUP;
- }
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
- break;
- case RTE_CRYPTO_AUTH_SHA1:
- case RTE_CRYPTO_AUTH_SHA256:
- case RTE_CRYPTO_AUTH_SHA512:
- case RTE_CRYPTO_AUTH_SHA224:
- case RTE_CRYPTO_AUTH_SHA384:
- case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CMAC:
- case RTE_CRYPTO_AUTH_AES_CBC_MAC:
- PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
- auth_xform->algo);
- return -ENOTSUP;
- default:
- PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
- auth_xform->algo);
- return -EINVAL;
- }
-
- session->auth_iv.offset = auth_xform->iv.offset;
- session->auth_iv.length = auth_xform->iv.length;
-
- if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
- if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
- session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
- session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- /*
- * It needs to create cipher desc content first,
- * then authentication
- */
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- auth_xform->key.data,
- auth_xform->key.length))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_auth(session,
- key_data,
- key_length,
- 0,
- auth_xform->digest_length,
- auth_xform->op))
- return -EINVAL;
- } else {
- session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
- session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
- /*
- * It needs to create authentication desc content first,
- * then cipher
- */
- if (qat_alg_aead_session_create_content_desc_auth(session,
- key_data,
- key_length,
- 0,
- auth_xform->digest_length,
- auth_xform->op))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- auth_xform->key.data,
- auth_xform->key.length))
- return -EINVAL;
- }
- /* Restore to authentication only only */
- session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
- } else {
- if (qat_alg_aead_session_create_content_desc_auth(session,
- key_data,
- key_length,
- 0,
- auth_xform->digest_length,
- auth_xform->op))
- return -EINVAL;
- }
-
- session->digest_length = auth_xform->digest_length;
- return 0;
-}
-
-int
-qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
-{
- struct rte_crypto_aead_xform *aead_xform = &xform->aead;
- enum rte_crypto_auth_operation crypto_operation;
-
- /*
- * Store AEAD IV parameters as cipher IV,
- * to avoid unnecessary memory usage
- */
- session->cipher_iv.offset = xform->aead.iv.offset;
- session->cipher_iv.length = xform->aead.iv.length;
-
- switch (aead_xform->algo) {
- case RTE_CRYPTO_AEAD_AES_GCM:
- if (qat_alg_validate_aes_key(aead_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES key size");
- return -EINVAL;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
- break;
- case RTE_CRYPTO_AEAD_AES_CCM:
- if (qat_alg_validate_aes_key(aead_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES key size");
- return -EINVAL;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
- break;
- default:
- PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
- aead_xform->algo);
- return -EINVAL;
- }
-
- if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
- aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
- (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
- aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
- session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- /*
- * It needs to create cipher desc content first,
- * then authentication
- */
-
- crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
- RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- aead_xform->key.data,
- aead_xform->key.length))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_auth(session,
- aead_xform->key.data,
- aead_xform->key.length,
- aead_xform->aad_length,
- aead_xform->digest_length,
- crypto_operation))
- return -EINVAL;
- } else {
- session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
- /*
- * It needs to create authentication desc content first,
- * then cipher
- */
-
- crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
- RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
-
- if (qat_alg_aead_session_create_content_desc_auth(session,
- aead_xform->key.data,
- aead_xform->key.length,
- aead_xform->aad_length,
- aead_xform->digest_length,
- crypto_operation))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- aead_xform->key.data,
- aead_xform->key.length))
- return -EINVAL;
- }
-
- session->digest_length = aead_xform->digest_length;
- return 0;
-}
-
-unsigned qat_crypto_sym_get_session_private_size(
- struct rte_cryptodev *dev __rte_unused)
-{
- return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
-}
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
- /* Decrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely(sym_op->m_dst != NULL))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = last_block - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
- last_block_len);
-#endif
- bpi_cipher_decrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
- last_block_len);
-#endif
- }
-
- return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline uint32_t
-qat_bpicipher_postprocess(struct qat_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len > 0 &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
-
- /* Encrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset;
-
- last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely(sym_op->m_dst != NULL))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = dst - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "BPI: src before post-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst before post-process:",
- dst, last_block_len);
-#endif
- bpi_cipher_encrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "BPI: src after post-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst after post-process:", dst,
- last_block_len);
-#endif
- }
- return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline void
-txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
- WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
- q->hw_queue_number, q->tail);
- q->nb_pending_requests = 0;
- q->csr_tail = q->tail;
-}
-
-uint16_t
-qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- register struct qat_queue *queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- register uint32_t nb_ops_sent = 0;
- register struct rte_crypto_op **cur_op = ops;
- register int ret;
- uint16_t nb_ops_possible = nb_ops;
- register uint8_t *base_addr;
- register uint32_t tail;
- int overflow;
-
- if (unlikely(nb_ops == 0))
- return 0;
-
- /* read params used a lot in main loop into registers */
- queue = &(tmp_qp->tx_q);
- base_addr = (uint8_t *)queue->base_addr;
- tail = queue->tail;
-
- /* Find how many can actually fit on the ring */
- tmp_qp->inflights16 += nb_ops;
- overflow = tmp_qp->inflights16 - queue->max_inflights;
- if (overflow > 0) {
- tmp_qp->inflights16 -= overflow;
- nb_ops_possible = nb_ops - overflow;
- if (nb_ops_possible == 0)
- return 0;
- }
-
- while (nb_ops_sent != nb_ops_possible) {
- ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
- tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
- if (ret != 0) {
- tmp_qp->stats.enqueue_err_count++;
- /*
- * This message cannot be enqueued,
- * decrease number of ops that wasn't sent
- */
- tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- }
-
- tail = adf_modulo(tail + queue->msg_size, queue->modulo);
- nb_ops_sent++;
- cur_op++;
- }
-kick_tail:
- queue->tail = tail;
- tmp_qp->stats.enqueued_count += nb_ops_sent;
- queue->nb_pending_requests += nb_ops_sent;
- if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
- queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
- txq_write_tail(tmp_qp, queue);
- }
- return nb_ops_sent;
-}
-
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
-{
- uint32_t old_head, new_head;
- uint32_t max_head;
-
- old_head = q->csr_head;
- new_head = q->head;
- max_head = qp->nb_descriptors * q->msg_size;
-
- /* write out free descriptors */
- void *cur_desc = (uint8_t *)q->base_addr + old_head;
-
- if (new_head < old_head) {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
- memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
- } else {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
- }
- q->nb_processed_responses = 0;
- q->csr_head = new_head;
-
- /* write current head to CSR */
- WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
- q->hw_queue_number, new_head);
-}
-
-uint16_t
-qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct qat_queue *rx_queue, *tx_queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- uint32_t msg_counter = 0;
- struct rte_crypto_op *rx_op;
- struct icp_qat_fw_comn_resp *resp_msg;
- uint32_t head;
-
- rx_queue = &(tmp_qp->rx_q);
- tx_queue = &(tmp_qp->tx_q);
- head = rx_queue->head;
- resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)rx_queue->base_addr + head);
-
- while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_ops) {
- rx_op = (struct rte_crypto_op *)(uintptr_t)
- (resp_msg->opaque_data);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
- sizeof(struct icp_qat_fw_comn_resp));
-#endif
- if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
- ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
- resp_msg->comn_hdr.comn_status)) {
- rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- struct qat_session *sess = (struct qat_session *)
- get_session_private_data(
- rx_op->sym->session,
- cryptodev_qat_driver_id);
-
- if (sess->bpi_ctx)
- qat_bpicipher_postprocess(sess, rx_op);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- }
-
- head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
- resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)rx_queue->base_addr + head);
- *ops = rx_op;
- ops++;
- msg_counter++;
- }
- if (msg_counter > 0) {
- rx_queue->head = head;
- tmp_qp->stats.dequeued_count += msg_counter;
- rx_queue->nb_processed_responses += msg_counter;
- tmp_qp->inflights16 -= msg_counter;
-
- if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
- rxq_free_desc(tmp_qp, rx_queue);
- }
- /* also check if tail needs to be advanced */
- if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
- tx_queue->tail != tx_queue->csr_tail) {
- txq_write_tail(tmp_qp, tx_queue);
- }
- return msg_counter;
-}
-
-static inline int
-qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
- struct qat_alg_buf_list *list, uint32_t data_len)
-{
- int nr = 1;
-
- uint32_t buf_len = rte_pktmbuf_iova(buf) -
- buff_start + rte_pktmbuf_data_len(buf);
-
- list->bufers[0].addr = buff_start;
- list->bufers[0].resrvd = 0;
- list->bufers[0].len = buf_len;
-
- if (data_len <= buf_len) {
- list->num_bufs = nr;
- list->bufers[0].len = data_len;
- return 0;
- }
-
- buf = buf->next;
- while (buf) {
- if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
- PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
- " entry(%u)",
- QAT_SGL_MAX_NUMBER);
- return -EINVAL;
- }
-
- list->bufers[nr].len = rte_pktmbuf_data_len(buf);
- list->bufers[nr].resrvd = 0;
- list->bufers[nr].addr = rte_pktmbuf_iova(buf);
-
- buf_len += list->bufers[nr].len;
- buf = buf->next;
-
- if (buf_len > data_len) {
- list->bufers[nr].len -=
- buf_len - data_len;
- buf = NULL;
- }
- ++nr;
- }
- list->num_bufs = nr;
-
- return 0;
-}
-
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
- struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_op *op,
- struct icp_qat_fw_la_bulk_req *qat_req)
-{
- /* copy IV into request if it fits */
- if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset),
- iv_length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- rte_crypto_op_ctophys_offset(op,
- iv_offset);
- }
-}
-
-/** Set IV for CCM is special case, 0th byte is set to q-1
- * where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
- struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
- rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
- iv_length);
- *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
- q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
- if (aad_len_field_sz)
- rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
- iv_length);
-}
-
-static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
-{
- int ret = 0;
- struct qat_session *ctx;
- struct icp_qat_fw_la_cipher_req_params *cipher_param;
- struct icp_qat_fw_la_auth_req_params *auth_param;
- register struct icp_qat_fw_la_bulk_req *qat_req;
- uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
- uint32_t cipher_len = 0, cipher_ofs = 0;
- uint32_t auth_len = 0, auth_ofs = 0;
- uint32_t min_ofs = 0;
- uint64_t src_buf_start = 0, dst_buf_start = 0;
- uint8_t do_sgl = 0;
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
- PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
- "operation requests, op (%p) is not a "
- "symmetric operation.", op);
- return -EINVAL;
- }
-#endif
- if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests, op (%p) is sessionless.", op);
- return -EINVAL;
- }
-
- ctx = (struct qat_session *)get_session_private_data(
- op->sym->session, cryptodev_qat_driver_id);
-
- if (unlikely(ctx == NULL)) {
- PMD_DRV_LOG(ERR, "Session was not created for this device");
- return -EINVAL;
- }
-
- if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
- PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- return -EINVAL;
- }
-
-
-
- qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
- rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
- cipher_param = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
-
- if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- /* AES-GCM or AES-CCM */
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
- (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
- && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
- && ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
- do_aead = 1;
- } else {
- do_auth = 1;
- do_cipher = 1;
- }
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
- do_auth = 1;
- do_cipher = 0;
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
- do_auth = 0;
- do_cipher = 1;
- }
-
- if (do_cipher) {
-
- if (ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
- ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
- ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-
- if (unlikely(
- (cipher_param->cipher_length % BYTE_LENGTH != 0)
- || (cipher_param->cipher_offset
- % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR,
- "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- cipher_len = op->sym->cipher.data.length >> 3;
- cipher_ofs = op->sym->cipher.data.offset >> 3;
-
- } else if (ctx->bpi_ctx) {
- /* DOCSIS - only send complete blocks to device
- * Process any partial block using CFB mode.
- * Even if 0 complete blocks, still send this to device
- * to get into rx queue for post-process and dequeuing
- */
- cipher_len = qat_bpicipher_preprocess(ctx, op);
- cipher_ofs = op->sym->cipher.data.offset;
- } else {
- cipher_len = op->sym->cipher.data.length;
- cipher_ofs = op->sym->cipher.data.offset;
- }
-
- set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
- min_ofs = cipher_ofs;
- }
-
- if (do_auth) {
-
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
- if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
- || (auth_param->auth_len % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR,
- "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- auth_ofs = op->sym->auth.data.offset >> 3;
- auth_len = op->sym->auth.data.length >> 3;
-
- auth_param->u1.aad_adr =
- rte_crypto_op_ctophys_offset(op,
- ctx->auth_iv.offset);
-
- } else if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- /* AES-GMAC */
- set_cipher_iv(ctx->auth_iv.length,
- ctx->auth_iv.offset,
- cipher_param, op, qat_req);
- auth_ofs = op->sym->auth.data.offset;
- auth_len = op->sym->auth.data.length;
-
- auth_param->u1.aad_adr = 0;
- auth_param->u2.aad_sz = 0;
-
- /*
- * If len(iv)==12B fw computes J0
- */
- if (ctx->auth_iv.length == 12) {
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-
- }
- } else {
- auth_ofs = op->sym->auth.data.offset;
- auth_len = op->sym->auth.data.length;
-
- }
- min_ofs = auth_ofs;
-
- if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
- auth_param->auth_res_addr =
- op->sym->auth.digest.phys_addr;
-
- }
-
- if (do_aead) {
- /*
- * This address may used for setting AAD physical pointer
- * into IV offset from op
- */
- rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
- if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- /*
- * If len(iv)==12B fw computes J0
- */
- if (ctx->cipher_iv.length == 12) {
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
- }
-
- set_cipher_iv(ctx->cipher_iv.length,
- ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
-
- } else if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
- /* In case of AES-CCM this may point to user selected memory
- * or iv offset in cypto_op
- */
- uint8_t *aad_data = op->sym->aead.aad.data;
- /* This is true AAD length, it not includes 18 bytes of
- * preceding data
- */
- uint8_t aad_ccm_real_len = 0;
-
- uint8_t aad_len_field_sz = 0;
- uint32_t msg_len_be =
- rte_bswap32(op->sym->aead.data.length);
-
- if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
- aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
- aad_ccm_real_len = ctx->aad_len -
- ICP_QAT_HW_CCM_AAD_B0_LEN -
- ICP_QAT_HW_CCM_AAD_LEN_INFO;
- } else {
- /*
- * aad_len not greater than 18, so no actual aad data,
- * then use IV after op for B0 block
- */
- aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
- aad_phys_addr_aead =
- rte_crypto_op_ctophys_offset(op,
- ctx->cipher_iv.offset);
- }
-
- uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-
- aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
- ctx->digest_length, q);
-
- if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET
- + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
- (uint8_t *)&msg_len_be,
- ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
- } else {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- (uint8_t *)&msg_len_be
- + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
- - q), q);
- }
-
- if (aad_len_field_sz > 0) {
- *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
- = rte_bswap16(aad_ccm_real_len);
-
- if ((aad_ccm_real_len + aad_len_field_sz)
- % ICP_QAT_HW_CCM_AAD_B0_LEN) {
- uint8_t pad_len = 0;
- uint8_t pad_idx = 0;
-
- pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
- ((aad_ccm_real_len + aad_len_field_sz) %
- ICP_QAT_HW_CCM_AAD_B0_LEN);
- pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
- aad_ccm_real_len + aad_len_field_sz;
- memset(&aad_data[pad_idx],
- 0, pad_len);
- }
-
- }
-
- set_cipher_iv_ccm(ctx->cipher_iv.length,
- ctx->cipher_iv.offset,
- cipher_param, op, q,
- aad_len_field_sz);
-
- }
-
- cipher_len = op->sym->aead.data.length;
- cipher_ofs = op->sym->aead.data.offset;
- auth_len = op->sym->aead.data.length;
- auth_ofs = op->sym->aead.data.offset;
-
- auth_param->u1.aad_adr = aad_phys_addr_aead;
- auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
- min_ofs = op->sym->aead.data.offset;
- }
-
- if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
- do_sgl = 1;
-
- /* adjust for chain case */
- if (do_cipher && do_auth)
- min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
- if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
- min_ofs = 0;
-
- if (unlikely(op->sym->m_dst != NULL)) {
- /* Out-of-place operation (OOP)
- * Don't align DMA start. DMA the minimum data-set
- * so as not to overwrite data in dest buffer
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
- dst_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-
- } else {
- /* In-place operation
- * Start DMA at nearest aligned address below min_ofs
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
- & QAT_64_BTYE_ALIGN_MASK;
-
- if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
- rte_pktmbuf_headroom(op->sym->m_src))
- > src_buf_start)) {
- /* alignment has pushed addr ahead of start of mbuf
- * so revert and take the performance hit
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src,
- min_ofs);
- }
- dst_buf_start = src_buf_start;
- }
-
- if (do_cipher || do_aead) {
- cipher_param->cipher_offset =
- (uint32_t)rte_pktmbuf_iova_offset(
- op->sym->m_src, cipher_ofs) - src_buf_start;
- cipher_param->cipher_length = cipher_len;
- } else {
- cipher_param->cipher_offset = 0;
- cipher_param->cipher_length = 0;
- }
-
- if (do_auth || do_aead) {
- auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
- op->sym->m_src, auth_ofs) - src_buf_start;
- auth_param->auth_len = auth_len;
- } else {
- auth_param->auth_off = 0;
- auth_param->auth_len = 0;
- }
-
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- (cipher_param->cipher_offset + cipher_param->cipher_length)
- > (auth_param->auth_off + auth_param->auth_len) ?
- (cipher_param->cipher_offset + cipher_param->cipher_length)
- : (auth_param->auth_off + auth_param->auth_len);
-
- if (do_sgl) {
-
- ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
- QAT_COMN_PTR_TYPE_SGL);
- ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
- &qat_op_cookie->qat_sgl_list_src,
- qat_req->comn_mid.src_length);
- if (ret) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
- return ret;
- }
-
- if (likely(op->sym->m_dst == NULL))
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- qat_op_cookie->qat_sgl_src_phys_addr;
- else {
- ret = qat_sgl_fill_array(op->sym->m_dst,
- dst_buf_start,
- &qat_op_cookie->qat_sgl_list_dst,
- qat_req->comn_mid.dst_length);
-
- if (ret) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot "
- "fill sgl array");
- return ret;
- }
-
- qat_req->comn_mid.src_data_addr =
- qat_op_cookie->qat_sgl_src_phys_addr;
- qat_req->comn_mid.dest_data_addr =
- qat_op_cookie->qat_sgl_dst_phys_addr;
- }
- } else {
- qat_req->comn_mid.src_data_addr = src_buf_start;
- qat_req->comn_mid.dest_data_addr = dst_buf_start;
- }
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "qat_req:", qat_req,
- sizeof(struct icp_qat_fw_la_bulk_req));
- rte_hexdump(stdout, "src_data:",
- rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
- rte_pktmbuf_data_len(op->sym->m_src));
- if (do_cipher) {
- uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->cipher_iv.offset);
- rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
- ctx->cipher_iv.length);
- }
-
- if (do_auth) {
- if (ctx->auth_iv.length) {
- uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->auth_iv.offset);
- rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
- ctx->auth_iv.length);
- }
- rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
- ctx->digest_length);
- }
-
- if (do_aead) {
- rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
- ctx->digest_length);
- rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
- ctx->aad_len);
- }
-#endif
- return 0;
-}
-
-static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
-{
- uint32_t div = data >> shift;
- uint32_t mult = div << shift;
-
- return data - mult;
-}
-
-int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
- __rte_unused struct rte_cryptodev_config *config)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
-int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
-void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
-int qat_dev_close(struct rte_cryptodev *dev)
-{
- int i, ret;
-
- PMD_INIT_FUNC_TRACE();
-
- for (i = 0; i < dev->data->nb_queue_pairs; i++) {
- ret = qat_crypto_sym_qp_release(dev, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-void qat_dev_info_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_info *info)
-{
- struct qat_pmd_private *internals = dev->data->dev_private;
-
- PMD_INIT_FUNC_TRACE();
- if (info != NULL) {
- info->max_nb_queue_pairs =
- ADF_NUM_SYM_QPS_PER_BUNDLE *
- ADF_NUM_BUNDLES_PER_DEV;
- info->feature_flags = dev->feature_flags;
- info->capabilities = internals->qat_dev_capabilities;
- info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->driver_id = cryptodev_qat_driver_id;
- info->pci_dev = RTE_DEV_TO_PCI(dev->device);
- }
-}
-
-void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_stats *stats)
-{
- int i;
- struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
-
- PMD_INIT_FUNC_TRACE();
- if (stats == NULL) {
- PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
- return;
- }
- for (i = 0; i < dev->data->nb_queue_pairs; i++) {
- if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
- continue;
- }
-
- stats->enqueued_count += qp[i]->stats.enqueued_count;
- stats->dequeued_count += qp[i]->stats.dequeued_count;
- stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
- stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
- }
-}
-
-void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
-{
- int i;
- struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
-
- PMD_INIT_FUNC_TRACE();
- for (i = 0; i < dev->data->nb_queue_pairs; i++)
- memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
- PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
-}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
deleted file mode 100644
index 281a142b..00000000
--- a/drivers/crypto/qat/qat_crypto.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_CRYPTO_H_
-#define _QAT_CRYPTO_H_
-
-#include <rte_cryptodev_pmd.h>
-#include <rte_memzone.h>
-
-#include "qat_crypto_capabilities.h"
-
-#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
-/**< Intel QAT Symmetric Crypto PMD device name */
-
-/*
- * This macro rounds up a number to a be a multiple of
- * the alignment when the alignment is a power of 2
- */
-#define ALIGN_POW2_ROUNDUP(num, align) \
- (((num) + (align) - 1) & ~((align) - 1))
-#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
-
-#define QAT_CSR_HEAD_WRITE_THRESH 32U
-/* number of requests to accumulate before writing head CSR */
-#define QAT_CSR_TAIL_WRITE_THRESH 32U
-/* number of requests to accumulate before writing tail CSR */
-#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
-/* number of inflights below which no tail write coalescing should occur */
-
-struct qat_session;
-
-enum qat_device_gen {
- QAT_GEN1 = 1,
- QAT_GEN2,
-};
-
-/**
- * Structure associated with each queue.
- */
-struct qat_queue {
- char memz_name[RTE_MEMZONE_NAMESIZE];
- void *base_addr; /* Base address */
- rte_iova_t base_phys_addr; /* Queue physical address */
- uint32_t head; /* Shadow copy of the head */
- uint32_t tail; /* Shadow copy of the tail */
- uint32_t modulo;
- uint32_t msg_size;
- uint16_t max_inflights;
- uint32_t queue_size;
- uint8_t hw_bundle_number;
- uint8_t hw_queue_number;
- /* HW queue aka ring offset on bundle */
- uint32_t csr_head; /* last written head value */
- uint32_t csr_tail; /* last written tail value */
- uint16_t nb_processed_responses;
- /* number of responses processed since last CSR head write */
- uint16_t nb_pending_requests;
- /* number of requests pending since last CSR tail write */
-};
-
-struct qat_qp {
- void *mmap_bar_addr;
- uint16_t inflights16;
- struct qat_queue tx_q;
- struct qat_queue rx_q;
- struct rte_cryptodev_stats stats;
- struct rte_mempool *op_cookie_pool;
- void **op_cookies;
- uint32_t nb_descriptors;
- enum qat_device_gen qat_dev_gen;
-} __rte_cache_aligned;
-
-/** private data structure for each QAT device */
-struct qat_pmd_private {
- unsigned max_nb_queue_pairs;
- /**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
- enum qat_device_gen qat_dev_gen;
- /**< QAT device generation */
- const struct rte_cryptodev_capabilities *qat_dev_capabilities;
-};
-
-extern uint8_t cryptodev_qat_driver_id;
-
-int qat_dev_config(struct rte_cryptodev *dev,
- struct rte_cryptodev_config *config);
-int qat_dev_start(struct rte_cryptodev *dev);
-void qat_dev_stop(struct rte_cryptodev *dev);
-int qat_dev_close(struct rte_cryptodev *dev);
-void qat_dev_info_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_info *info);
-
-void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_stats *stats);
-void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev);
-
-int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *rx_conf, int socket_id,
- struct rte_mempool *session_pool);
-int qat_crypto_sym_qp_release(struct rte_cryptodev *dev,
- uint16_t queue_pair_id);
-
-int
-qat_pmd_session_mempool_create(struct rte_cryptodev *dev,
- unsigned nb_objs, unsigned obj_cache_size, int socket_id);
-
-extern unsigned
-qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev);
-
-extern int
-qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct rte_cryptodev_sym_session *sess,
- struct rte_mempool *mempool);
-
-
-int
-qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private);
-
-int
-qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
-
-int
-qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
-
-int
-qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
-
-
-extern void
-qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
- struct rte_cryptodev_sym_session *session);
-
-extern uint16_t
-qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
-
-extern uint16_t
-qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
-
-#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/qat_logs.h b/drivers/crypto/qat/qat_logs.h
deleted file mode 100644
index c9144bf6..00000000
--- a/drivers/crypto/qat/qat_logs.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#ifndef _QAT_LOGS_H_
-#define _QAT_LOGS_H_
-
-#define PMD_INIT_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
- "PMD: %s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX_FREE
-#define PMD_TX_FREE_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
-
-#endif /* _QAT_LOGS_H_ */
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
deleted file mode 100644
index 7fea10c7..00000000
--- a/drivers/crypto/qat/qat_qp.c
+++ /dev/null
@@ -1,451 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_memzone.h>
-#include <rte_cryptodev_pmd.h>
-#include <rte_pci.h>
-#include <rte_bus_pci.h>
-#include <rte_atomic.h>
-#include <rte_prefetch.h>
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_algs.h"
-#include "adf_transport_access_macros.h"
-
-#define ADF_MAX_SYM_DESC 4096
-#define ADF_MIN_SYM_DESC 128
-#define ADF_SYM_TX_RING_DESC_SIZE 128
-#define ADF_SYM_RX_RING_DESC_SIZE 32
-#define ADF_SYM_TX_QUEUE_STARTOFF 2
-/* Offset from bundle start to 1st Sym Tx queue */
-#define ADF_SYM_RX_QUEUE_STARTOFF 10
-#define ADF_ARB_REG_SLOT 0x1000
-#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
-
-#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
- ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
- (ADF_ARB_REG_SLOT * index), value)
-
-static int qat_qp_check_queue_alignment(uint64_t phys_addr,
- uint32_t queue_size_bytes);
-static int qat_tx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
- int socket_id);
-static int qat_rx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
- int socket_id);
-static void qat_queue_delete(struct qat_queue *queue);
-static int qat_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
- int socket_id);
-static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
- uint32_t *queue_size_for_csr);
-static void adf_configure_queues(struct qat_qp *queue);
-static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
-static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
-
-static const struct rte_memzone *
-queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
- int socket_id)
-{
- const struct rte_memzone *mz;
-
- PMD_INIT_FUNC_TRACE();
- mz = rte_memzone_lookup(queue_name);
- if (mz != 0) {
- if (((size_t)queue_size <= mz->len) &&
- ((socket_id == SOCKET_ID_ANY) ||
- (socket_id == mz->socket_id))) {
- PMD_DRV_LOG(DEBUG, "re-use memzone already "
- "allocated for %s", queue_name);
- return mz;
- }
-
- PMD_DRV_LOG(ERR, "Incompatible memzone already "
- "allocated %s, size %u, socket %d. "
- "Requested size %u, socket %u",
- queue_name, (uint32_t)mz->len,
- mz->socket_id, queue_size, socket_id);
- return NULL;
- }
-
- PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
- queue_name, queue_size, socket_id);
- return rte_memzone_reserve_aligned(queue_name, queue_size,
- socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
-}
-
-int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id, struct rte_mempool *session_pool __rte_unused)
-{
- struct qat_qp *qp;
- struct rte_pci_device *pci_dev;
- int ret;
- char op_cookie_pool_name[RTE_RING_NAMESIZE];
- uint32_t i;
-
- PMD_INIT_FUNC_TRACE();
-
- /* If qp is already in use free ring memory and qp metadata. */
- if (dev->data->queue_pairs[queue_pair_id] != NULL) {
- ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
- if (ret < 0)
- return ret;
- }
-
- if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
- (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
- PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
- qp_conf->nb_descriptors);
- return -EINVAL;
- }
-
- pci_dev = RTE_DEV_TO_PCI(dev->device);
-
- if (pci_dev->mem_resource[0].addr == NULL) {
- PMD_DRV_LOG(ERR, "Could not find VF config space "
- "(UIO driver attached?).");
- return -EINVAL;
- }
-
- if (queue_pair_id >=
- (ADF_NUM_SYM_QPS_PER_BUNDLE *
- ADF_NUM_BUNDLES_PER_DEV)) {
- PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
- queue_pair_id);
- return -EINVAL;
- }
- /* Allocate the queue pair data structure. */
- qp = rte_zmalloc("qat PMD qp metadata",
- sizeof(*qp), RTE_CACHE_LINE_SIZE);
- if (qp == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
- return -ENOMEM;
- }
- qp->nb_descriptors = qp_conf->nb_descriptors;
- qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
- qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
- RTE_CACHE_LINE_SIZE);
- if (qp->op_cookies == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
- rte_free(qp);
- return -ENOMEM;
- }
-
- qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
- qp->inflights16 = 0;
-
- if (qat_tx_queue_create(dev, &(qp->tx_q),
- queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
- PMD_INIT_LOG(ERR, "Tx queue create failed "
- "queue_pair_id=%u", queue_pair_id);
- goto create_err;
- }
-
- if (qat_rx_queue_create(dev, &(qp->rx_q),
- queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
- PMD_DRV_LOG(ERR, "Rx queue create failed "
- "queue_pair_id=%hu", queue_pair_id);
- qat_queue_delete(&(qp->tx_q));
- goto create_err;
- }
-
- adf_configure_queues(qp);
- adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
- snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
- pci_dev->driver->driver.name, dev->data->dev_id,
- queue_pair_id);
-
- qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
- if (qp->op_cookie_pool == NULL)
- qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
- qp->nb_descriptors,
- sizeof(struct qat_crypto_op_cookie), 64, 0,
- NULL, NULL, NULL, NULL, socket_id,
- 0);
- if (!qp->op_cookie_pool) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
- " op mempool");
- goto create_err;
- }
-
- for (i = 0; i < qp->nb_descriptors; i++) {
- if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
- goto create_err;
- }
-
- struct qat_crypto_op_cookie *sql_cookie =
- qp->op_cookies[i];
-
- sql_cookie->qat_sgl_src_phys_addr =
- rte_mempool_virt2iova(sql_cookie) +
- offsetof(struct qat_crypto_op_cookie,
- qat_sgl_list_src);
-
- sql_cookie->qat_sgl_dst_phys_addr =
- rte_mempool_virt2iova(sql_cookie) +
- offsetof(struct qat_crypto_op_cookie,
- qat_sgl_list_dst);
- }
-
- struct qat_pmd_private *internals
- = dev->data->dev_private;
- qp->qat_dev_gen = internals->qat_dev_gen;
-
- dev->data->queue_pairs[queue_pair_id] = qp;
- return 0;
-
-create_err:
- if (qp->op_cookie_pool)
- rte_mempool_free(qp->op_cookie_pool);
- rte_free(qp->op_cookies);
- rte_free(qp);
- return -EFAULT;
-}
-
-int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
-{
- struct qat_qp *qp =
- (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
- uint32_t i;
-
- PMD_INIT_FUNC_TRACE();
- if (qp == NULL) {
- PMD_DRV_LOG(DEBUG, "qp already freed");
- return 0;
- }
-
- /* Don't free memory if there are still responses to be processed */
- if (qp->inflights16 == 0) {
- qat_queue_delete(&(qp->tx_q));
- qat_queue_delete(&(qp->rx_q));
- } else {
- return -EAGAIN;
- }
-
- adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
-
- for (i = 0; i < qp->nb_descriptors; i++)
- rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
-
- if (qp->op_cookie_pool)
- rte_mempool_free(qp->op_cookie_pool);
-
- rte_free(qp->op_cookies);
- rte_free(qp);
- dev->data->queue_pairs[queue_pair_id] = NULL;
- return 0;
-}
-
-static int qat_tx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t qp_id,
- uint32_t nb_desc, int socket_id)
-{
- PMD_INIT_FUNC_TRACE();
- queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
- queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
- ADF_SYM_TX_QUEUE_STARTOFF;
- PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
- nb_desc, qp_id, queue->hw_bundle_number,
- queue->hw_queue_number);
-
- return qat_queue_create(dev, queue, nb_desc,
- ADF_SYM_TX_RING_DESC_SIZE, socket_id);
-}
-
-static int qat_rx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
- int socket_id)
-{
- PMD_INIT_FUNC_TRACE();
- queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
- queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
- ADF_SYM_RX_QUEUE_STARTOFF;
-
- PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
- nb_desc, qp_id, queue->hw_bundle_number,
- queue->hw_queue_number);
- return qat_queue_create(dev, queue, nb_desc,
- ADF_SYM_RX_RING_DESC_SIZE, socket_id);
-}
-
-static void qat_queue_delete(struct qat_queue *queue)
-{
- const struct rte_memzone *mz;
- int status = 0;
-
- if (queue == NULL) {
- PMD_DRV_LOG(DEBUG, "Invalid queue");
- return;
- }
- mz = rte_memzone_lookup(queue->memz_name);
- if (mz != NULL) {
- /* Write an unused pattern to the queue memory. */
- memset(queue->base_addr, 0x7F, queue->queue_size);
- status = rte_memzone_free(mz);
- if (status != 0)
- PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
- status, queue->memz_name);
- } else {
- PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
- queue->memz_name);
- }
-}
-
-static int
-qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
- uint32_t nb_desc, uint8_t desc_size, int socket_id)
-{
- uint64_t queue_base;
- void *io_addr;
- const struct rte_memzone *qp_mz;
- uint32_t queue_size_bytes = nb_desc*desc_size;
- struct rte_pci_device *pci_dev;
-
- PMD_INIT_FUNC_TRACE();
- if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
- PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
- return -EINVAL;
- }
-
- pci_dev = RTE_DEV_TO_PCI(dev->device);
-
- /*
- * Allocate a memzone for the queue - create a unique name.
- */
- snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
- pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
- queue->hw_bundle_number, queue->hw_queue_number);
- qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
- socket_id);
- if (qp_mz == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
- return -ENOMEM;
- }
-
- queue->base_addr = (char *)qp_mz->addr;
- queue->base_phys_addr = qp_mz->iova;
- if (qat_qp_check_queue_alignment(queue->base_phys_addr,
- queue_size_bytes)) {
- PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
- " 0x%"PRIx64"\n",
- queue->base_phys_addr);
- return -EFAULT;
- }
-
- if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
- != 0) {
- PMD_DRV_LOG(ERR, "Invalid num inflights");
- return -EINVAL;
- }
-
- queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
- ADF_BYTES_TO_MSG_SIZE(desc_size));
- queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
- PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
- " msg_size %u, max_inflights %u modulo %u",
- queue->queue_size, queue_size_bytes,
- nb_desc, desc_size, queue->max_inflights,
- queue->modulo);
-
- if (queue->max_inflights < 2) {
- PMD_DRV_LOG(ERR, "Invalid num inflights");
- return -EINVAL;
- }
- queue->head = 0;
- queue->tail = 0;
- queue->msg_size = desc_size;
-
- /*
- * Write an unused pattern to the queue memory.
- */
- memset(queue->base_addr, 0x7F, queue_size_bytes);
-
- queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
- queue->queue_size);
-
- io_addr = pci_dev->mem_resource[0].addr;
-
- WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
- queue->hw_queue_number, queue_base);
- return 0;
-}
-
-static int qat_qp_check_queue_alignment(uint64_t phys_addr,
- uint32_t queue_size_bytes)
-{
- PMD_INIT_FUNC_TRACE();
- if (((queue_size_bytes - 1) & phys_addr) != 0)
- return -EINVAL;
- return 0;
-}
-
-static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
- uint32_t *p_queue_size_for_csr)
-{
- uint8_t i = ADF_MIN_RING_SIZE;
-
- PMD_INIT_FUNC_TRACE();
- for (; i <= ADF_MAX_RING_SIZE; i++)
- if ((msg_size * msg_num) ==
- (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
- *p_queue_size_for_csr = i;
- return 0;
- }
- PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
- return -EINVAL;
-}
-
-static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
-{
- uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
- (ADF_ARB_REG_SLOT *
- txq->hw_bundle_number);
- uint32_t value;
-
- PMD_INIT_FUNC_TRACE();
- value = ADF_CSR_RD(base_addr, arb_csr_offset);
- value |= (0x01 << txq->hw_queue_number);
- ADF_CSR_WR(base_addr, arb_csr_offset, value);
-}
-
-static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
-{
- uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
- (ADF_ARB_REG_SLOT *
- txq->hw_bundle_number);
- uint32_t value;
-
- PMD_INIT_FUNC_TRACE();
- value = ADF_CSR_RD(base_addr, arb_csr_offset);
- value ^= (0x01 << txq->hw_queue_number);
- ADF_CSR_WR(base_addr, arb_csr_offset, value);
-}
-
-static void adf_configure_queues(struct qat_qp *qp)
-{
- uint32_t queue_config;
- struct qat_queue *queue = &qp->tx_q;
-
- PMD_INIT_FUNC_TRACE();
- queue_config = BUILD_RING_CONFIG(queue->queue_size);
-
- WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
- queue->hw_queue_number, queue_config);
-
- queue = &qp->rx_q;
- queue_config =
- BUILD_RESP_RING_CONFIG(queue->queue_size,
- ADF_RING_NEAR_WATERMARK_512,
- ADF_RING_NEAR_WATERMARK_0);
-
- WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
- queue->hw_queue_number, queue_config);
-}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
new file mode 100644
index 00000000..10cdf2e1
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym.c
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <openssl/evp.h>
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_crypto_sym.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+
+#include "qat_sym.h"
+
+/** Decrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_decrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_decrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+ return -EINVAL;
+}
+
+
+static inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+
+ /* Decrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = last_block - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_decrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+ dst, last_block_len);
+#endif
+ }
+
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline void
+set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ /* copy IV into request if it fits */
+ if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset),
+ iv_length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ rte_crypto_op_ctophys_offset(op,
+ iv_offset);
+ }
+}
+
+/** Set IV for CCM is special case, 0th byte is set to q-1
+ * where q is padding of nonce in 16 byte block
+ */
+static inline void
+set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+{
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+}
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen)
+{
+ int ret = 0;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ register struct icp_qat_fw_la_bulk_req *qat_req;
+ uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ uint32_t auth_len = 0, auth_ofs = 0;
+ uint32_t min_ofs = 0;
+ uint64_t src_buf_start = 0, dst_buf_start = 0;
+ uint8_t do_sgl = 0;
+ struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+ struct qat_sym_op_cookie *cookie =
+ (struct qat_sym_op_cookie *)op_cookie;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ return -EINVAL;
+ }
+
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
+ " requests, op (%p) is sessionless.", op);
+ return -EINVAL;
+ }
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_qat_driver_id);
+
+ if (unlikely(ctx == NULL)) {
+ QAT_DP_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
+ QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+ if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ /* AES-GCM or AES-CCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+ do_aead = 1;
+ } else {
+ do_auth = 1;
+ do_cipher = 1;
+ }
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ do_auth = 1;
+ do_cipher = 0;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ do_auth = 0;
+ do_cipher = 1;
+ }
+
+ if (do_cipher) {
+
+ if (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+
+ if (unlikely(
+ (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+ (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
+ "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cipher_len = op->sym->cipher.data.length >> 3;
+ cipher_ofs = op->sym->cipher.data.offset >> 3;
+
+ } else if (ctx->bpi_ctx) {
+ /* DOCSIS - only send complete blocks to device
+ * Process any partial block using CFB mode.
+ * Even if 0 complete blocks, still send this to device
+ * to get into rx queue for post-process and dequeuing
+ */
+ cipher_len = qat_bpicipher_preprocess(ctx, op);
+ cipher_ofs = op->sym->cipher.data.offset;
+ } else {
+ cipher_len = op->sym->cipher.data.length;
+ cipher_ofs = op->sym->cipher.data.offset;
+ }
+
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+ min_ofs = cipher_ofs;
+ }
+
+ if (do_auth) {
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+ if (unlikely(
+ (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+ (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ auth_len = op->sym->auth.data.length >> 3;
+
+ auth_param->u1.aad_adr =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /* AES-GMAC */
+ set_cipher_iv(ctx->auth_iv.length,
+ ctx->auth_iv.offset,
+ cipher_param, op, qat_req);
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ auth_param->u1.aad_adr = 0;
+ auth_param->u2.aad_sz = 0;
+
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->auth_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+
+ }
+ } else {
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ }
+ min_ofs = auth_ofs;
+
+ if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+ auth_param->auth_res_addr =
+ op->sym->auth.digest.phys_addr;
+
+ }
+
+ if (do_aead) {
+ /*
+ * This address may used for setting AAD physical pointer
+ * into IV offset from op
+ */
+ rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->cipher_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+ set_cipher_iv(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data = op->sym->aead.aad.data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len = 0;
+ uint8_t aad_len_field_sz = 0;
+ uint32_t msg_len_be =
+ rte_bswap32(op->sym->aead.data.length);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ /*
+ * aad_len not greater than 18, so no actual aad
+ * data, then use IV after op for B0 block
+ */
+ aad_data = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ aad_phys_addr_aead =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ }
+
+ uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
+ ctx->cipher_iv.length;
+
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+ aad_len_field_sz,
+ ctx->digest_length, q);
+
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
+ = rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx],
+ 0, pad_len);
+ }
+
+ }
+
+ set_cipher_iv_ccm(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, q,
+ aad_len_field_sz);
+
+ }
+
+ cipher_len = op->sym->aead.data.length;
+ cipher_ofs = op->sym->aead.data.offset;
+ auth_len = op->sym->aead.data.length;
+ auth_ofs = op->sym->aead.data.offset;
+
+ auth_param->u1.aad_adr = aad_phys_addr_aead;
+ auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
+ min_ofs = op->sym->aead.data.offset;
+ }
+
+ if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+ do_sgl = 1;
+
+ /* adjust for chain case */
+ if (do_cipher && do_auth)
+ min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+
+ if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
+ min_ofs = 0;
+
+ if (unlikely(op->sym->m_dst != NULL)) {
+ /* Out-of-place operation (OOP)
+ * Don't align DMA start. DMA the minimum data-set
+ * so as not to overwrite data in dest buffer
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
+ dst_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
+
+ } else {
+ /* In-place operation
+ * Start DMA at nearest aligned address below min_ofs
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
+ & QAT_64_BTYE_ALIGN_MASK;
+
+ if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
+ rte_pktmbuf_headroom(op->sym->m_src))
+ > src_buf_start)) {
+ /* alignment has pushed addr ahead of start of mbuf
+ * so revert and take the performance hit
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src,
+ min_ofs);
+ }
+ dst_buf_start = src_buf_start;
+ }
+
+ if (do_cipher || do_aead) {
+ cipher_param->cipher_offset =
+ (uint32_t)rte_pktmbuf_iova_offset(
+ op->sym->m_src, cipher_ofs) - src_buf_start;
+ cipher_param->cipher_length = cipher_len;
+ } else {
+ cipher_param->cipher_offset = 0;
+ cipher_param->cipher_length = 0;
+ }
+
+ if (do_auth || do_aead) {
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
+ op->sym->m_src, auth_ofs) - src_buf_start;
+ auth_param->auth_len = auth_len;
+ } else {
+ auth_param->auth_off = 0;
+ auth_param->auth_len = 0;
+ }
+
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ > (auth_param->auth_off + auth_param->auth_len) ?
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ : (auth_param->auth_off + auth_param->auth_len);
+
+ if (do_sgl) {
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ ret = qat_sgl_fill_array(op->sym->m_src,
+ (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
+ &cookie->qat_sgl_src,
+ qat_req->comn_mid.src_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ if (likely(op->sym->m_dst == NULL))
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ else {
+ ret = qat_sgl_fill_array(op->sym->m_dst,
+ (int64_t)(dst_buf_start -
+ rte_pktmbuf_iova(op->sym->m_dst)),
+ &cookie->qat_sgl_dst,
+ qat_req->comn_mid.dst_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
+ return ret;
+ }
+
+ qat_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ qat_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ }
+ } else {
+ qat_req->comn_mid.src_data_addr = src_buf_start;
+ qat_req->comn_mid.dest_data_addr = dst_buf_start;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (do_cipher) {
+ uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
+ ctx->cipher_iv.length);
+ }
+
+ if (do_auth) {
+ if (ctx->auth_iv.length) {
+ uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->auth_iv.offset);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
+ ctx->auth_iv.length);
+ }
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
+ ctx->digest_length);
+ }
+
+ if (do_aead) {
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+ ctx->digest_length);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
+ ctx->aad_len);
+ }
+#endif
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
new file mode 100644
index 00000000..bc6426c3
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_H_
+#define _QAT_SYM_H_
+
+#include <rte_cryptodev_pmd.h>
+
+#ifdef BUILD_QAT_SYM
+#include <openssl/evp.h>
+
+#include "qat_common.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+#include "qat_logs.h"
+
+#define BYTE_LENGTH 8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER 16
+
+struct qat_sym_session;
+
+struct qat_sym_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_op_cookie {
+ struct qat_sym_sgl qat_sgl_src;
+ struct qat_sym_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen);
+
+
+/** Encrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt the IV, then XOR this with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_encrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_encrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+ return -EINVAL;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len > 0 &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+ /* Encrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset;
+
+ last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = dst - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst before post-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_encrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst after post-process:",
+ dst, last_block_len);
+#endif
+ }
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline void
+qat_sym_process_response(void **op, uint8_t *resp)
+{
+
+ struct icp_qat_fw_comn_resp *resp_msg =
+ (struct icp_qat_fw_comn_resp *)resp;
+ struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status)) {
+
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ struct qat_sym_session *sess = (struct qat_sym_session *)
+ get_sym_session_private_data(
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
+
+
+ if (sess->bpi_ctx)
+ qat_bpicipher_postprocess(sess, rx_op);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ *op = (void *)rx_op;
+}
+#else
+
+static inline void
+qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+{
+}
+#endif
+#endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_sym_capabilities.h
index 001c32c5..eea08bc7 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_sym_capabilities.h
@@ -2,8 +2,8 @@
* Copyright(c) 2017-2018 Intel Corporation
*/
-#ifndef _QAT_CRYPTO_CAPABILITIES_H_
-#define _QAT_CRYPTO_CAPABILITIES_H_
+#ifndef _QAT_SYM_CAPABILITIES_H_
+#define _QAT_SYM_CAPABILITIES_H_
#define QAT_BASE_GEN1_SYM_CAPABILITIES \
{ /* SHA1 HMAC */ \
@@ -434,7 +434,7 @@
.algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
.block_size = 8, \
.key_size = { \
- .min = 16, \
+ .min = 8, \
.max = 24, \
.increment = 8 \
}, \
@@ -554,4 +554,4 @@
}, } \
}
-#endif /* _QAT_CRYPTO_CAPABILITIES_H_ */
+#endif /* _QAT_SYM_CAPABILITIES_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
new file mode 100644
index 00000000..96f442e8
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_pci.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_logs.h"
+#include "qat_sym.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+
+uint8_t cryptodev_qat_driver_id;
+
+static const struct rte_cryptodev_capabilities qat_gen1_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities qat_gen2_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ QAT_EXTRA_GEN2_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int qat_sym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+
+static int qat_sym_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+static int qat_sym_dev_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static void qat_sym_dev_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return;
+}
+
+static int qat_sym_dev_close(struct rte_cryptodev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_sym_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qat_sym_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qat_gen_config[internals->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = internals->qat_dev_capabilities;
+ info->driver_id = cryptodev_qat_driver_id;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ }
+}
+
+static void qat_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_sym_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_SYMMETRIC);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void qat_sym_stats_reset(struct rte_cryptodev *dev)
+{
+ struct qat_sym_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid cryptodev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_SYMMETRIC);
+
+}
+
+static int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release sym qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool __rte_unused)
+{
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+ struct qat_qp_config qat_qp_conf;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+ const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_sym_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_sym_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie);
+ qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "sym";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_dst);
+ }
+
+ return ret;
+}
+
+static struct rte_cryptodev_ops crypto_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_sym_dev_config,
+ .dev_start = qat_sym_dev_start,
+ .dev_stop = qat_sym_dev_stop,
+ .dev_close = qat_sym_dev_close,
+ .dev_infos_get = qat_sym_dev_info_get,
+
+ .stats_get = qat_sym_stats_get,
+ .stats_reset = qat_sym_stats_reset,
+ .queue_pair_setup = qat_sym_qp_setup,
+ .queue_pair_release = qat_sym_qp_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = qat_sym_session_get_private_size,
+ .sym_session_configure = qat_sym_session_configure,
+ .sym_session_clear = qat_sym_session_clear
+};
+
+static uint16_t
+qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+ .name = qat_sym_drv_name,
+ .alias = qat_sym_drv_name
+};
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ .private_data_size = sizeof(struct qat_sym_dev_private)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct qat_sym_dev_private *internals;
+
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "sym");
+ QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+ /* Populate subset device to use in cryptodev device creation */
+ qat_pci_dev->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+ qat_pci_dev->sym_rte_dev.numa_node =
+ qat_pci_dev->pci_dev->device.numa_node;
+ qat_pci_dev->sym_rte_dev.devargs = NULL;
+
+ cryptodev = rte_cryptodev_pmd_create(name,
+ &(qat_pci_dev->sym_rte_dev), &init_params);
+
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ qat_pci_dev->sym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = cryptodev_qat_driver_id;
+ cryptodev->dev_ops = &crypto_qat_ops;
+
+ cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ internals = cryptodev->data->dev_private;
+ internals->qat_dev = qat_pci_dev;
+ qat_pci_dev->sym_dev = internals;
+
+ internals->sym_dev_id = cryptodev->data->dev_id;
+ switch (qat_pci_dev->qat_dev_gen) {
+ case QAT_GEN1:
+ internals->qat_dev_capabilities = qat_gen1_sym_capabilities;
+ break;
+ case QAT_GEN2:
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ break;
+ default:
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ QAT_LOG(DEBUG,
+ "QAT gen %d capabilities unknown, default to GEN2",
+ qat_pci_dev->qat_dev_gen);
+ break;
+ }
+
+ QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+ cryptodev->data->name, internals->sym_dev_id);
+ return 0;
+}
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+ if (qat_pci_dev->sym_dev == NULL)
+ return 0;
+
+ /* free crypto device */
+ cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->sym_dev_id);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ qat_pci_dev->sym_rte_dev.name = NULL;
+ qat_pci_dev->sym_dev = NULL;
+
+ return 0;
+}
+
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+ cryptodev_qat_sym_driver,
+ cryptodev_qat_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
new file mode 100644
index 00000000..d3432854
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_pmd.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_PMD_H_
+#define _QAT_SYM_PMD_H_
+
+#ifdef BUILD_QAT_SYM
+
+#include <rte_cryptodev.h>
+
+#include "qat_sym_capabilities.h"
+#include "qat_device.h"
+
+/**< Intel(R) QAT Symmetric Crypto PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+
+extern uint8_t cryptodev_qat_driver_id;
+
+/** private data structure for a QAT device.
+ * This QAT device is a device offering only symmetric crypto service,
+ * there can be one of these on each qat_pci_device (VF),
+ * in future there may also be private data structures for other services.
+ */
+struct qat_sym_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ uint8_t sym_dev_id;
+ /**< Device instance for this rte_cryptodev */
+ const struct rte_cryptodev_capabilities *qat_dev_capabilities;
+ /* QAT device symmetric crypto capabilities */
+};
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif
+#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_sym_session.c
index c87ed40f..1d58220a 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,6 +1,12 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
* Copyright(c) 2015-2018 Intel Corporation
*/
+
+#include <openssl/sha.h> /* Needed to calculate pre-compute values */
+#include <openssl/aes.h> /* Needed to calculate pre-compute values */
+#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+#include <openssl/evp.h> /* Needed for bpi runt block processing */
+
#include <rte_memcpy.h>
#include <rte_common.h>
#include <rte_spinlock.h>
@@ -9,13 +15,714 @@
#include <rte_malloc.h>
#include <rte_crypto_sym.h>
-#include "../qat_logs.h"
+#include "qat_logs.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
-#include <openssl/sha.h> /* Needed to calculate pre-compute values */
-#include <openssl/aes.h> /* Needed to calculate pre-compute values */
-#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+/** Frees a context previously created
+ * Depends on openssl libcrypto
+ */
+static void
+bpi_cipher_ctx_free(void *bpi_ctx)
+{
+ if (bpi_ctx != NULL)
+ EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
+}
+
+/** Creates a context in either AES or DES in ECB mode
+ * Depends on openssl libcrypto
+ */
+static int
+bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
+ enum rte_crypto_cipher_operation direction __rte_unused,
+ uint8_t *key, void **ctx)
+{
+ const EVP_CIPHER *algo = NULL;
+ int ret;
+ *ctx = EVP_CIPHER_CTX_new();
+
+ if (*ctx == NULL) {
+ ret = -ENOMEM;
+ goto ctx_init_err;
+ }
+
+ if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
+ algo = EVP_des_ecb();
+ else
+ algo = EVP_aes_128_ecb();
+
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
+ if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
+ ret = -EINVAL;
+ goto ctx_init_err;
+ }
+
+ return 0;
+
+ctx_init_err:
+ if (*ctx != NULL)
+ EVP_CIPHER_CTX_free(*ctx);
+ return ret;
+}
+
+static int
+qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
+ struct qat_sym_dev_private *internals)
+{
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ continue;
+
+ if (capability->sym.cipher.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
+ struct qat_sym_dev_private *internals)
+{
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ continue;
+
+ if (capability->sym.auth.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+void
+qat_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+ struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
+
+ if (sess_priv) {
+ if (s->bpi_ctx)
+ bpi_cipher_ctx_free(s->bpi_ctx);
+ memset(s, 0, qat_sym_session_get_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_AUTH;
+
+ /* AEAD */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ /* AES-GCM and AES-CCM works with different direction
+ * GCM first encrypts and generate hash where AES-CCM
+ * first generate hash and encrypts. Similar relation
+ * applies to decryption.
+ */
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ }
+
+ if (xform->next == NULL)
+ return -1;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+
+ return -1;
+}
+
+static struct rte_crypto_auth_xform *
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+int
+qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ int ret;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ session->cipher_iv.offset = cipher_xform->iv.offset;
+ session->cipher_iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ if (qat_sym_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ if (qat_sym_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid KASUMI cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ if (qat_sym_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ if (qat_sym_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ if (qat_sym_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ ret = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
+ QAT_LOG(ERR, "failed to create DES BPI ctx");
+ goto error_out;
+ }
+ if (qat_sym_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ ret = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
+ QAT_LOG(ERR, "failed to create AES BPI ctx");
+ goto error_out;
+ }
+ if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ if (!qat_is_cipher_alg_supported(
+ cipher_xform->algo, internals)) {
+ QAT_LOG(ERR, "%s not supported on this device",
+ rte_crypto_cipher_algorithm_strings
+ [cipher_xform->algo]);
+ ret = -ENOTSUP;
+ goto error_out;
+ }
+ if (qat_sym_validate_zuc_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid ZUC cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ ret = -ENOTSUP;
+ goto error_out;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ else
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length)) {
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ return 0;
+
+error_out:
+ if (session->bpi_ctx) {
+ bpi_cipher_ctx_free(session->bpi_ctx);
+ session->bpi_ctx = NULL;
+ }
+ return ret;
+}
-#include "qat_algs.h"
+int
+qat_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ QAT_LOG(ERR,
+ "Crypto QAT PMD: failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+int
+qat_sym_session_set_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_sym_session *session = session_private;
+ int ret;
+ int qat_cmd_id;
+
+ /* Set context descriptor physical address */
+ session->cd_paddr = rte_mempool_virt2iova(session) +
+ offsetof(struct qat_sym_session, cd);
+
+ session->min_qat_dev_gen = QAT_GEN1;
+
+ /* Get requested QAT command id */
+ qat_cmd_id = qat_get_cmd_id(xform);
+ if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
+ QAT_LOG(ERR, "Unsupported xform chain requested");
+ return -ENOTSUP;
+ }
+ session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
+ switch (session->qat_cmd) {
+ case ICP_QAT_FW_LA_CMD_CIPHER:
+ ret = qat_sym_session_configure_cipher(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ case ICP_QAT_FW_LA_CMD_AUTH:
+ ret = qat_sym_session_configure_auth(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_sym_session_configure_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_sym_session_configure_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_sym_session_configure_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_sym_session_configure_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_sym_session_configure_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_sym_session_configure_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
+ case ICP_QAT_FW_LA_CMD_TRNG_TEST:
+ case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_MGF1:
+ case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_DELIMITER:
+ QAT_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ return -ENOTSUP;
+ default:
+ QAT_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+int
+qat_sym_session_configure_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ uint8_t *key_data = auth_xform->key.data;
+ uint8_t key_length = auth_xform->key.length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ if (qat_sym_validate_aes_key(auth_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
+ QAT_LOG(ERR, "%s not supported on this device",
+ rte_crypto_auth_algorithm_strings
+ [auth_xform->algo]);
+ return -ENOTSUP;
+ }
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
+ auth_xform->algo);
+ return -ENOTSUP;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -EINVAL;
+ }
+
+ session->auth_iv.offset = auth_xform->iv.offset;
+ session->auth_iv.length = auth_xform->iv.length;
+
+ if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ } else {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+ }
+ /* Restore to authentication only only */
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
+ } else {
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ }
+
+ session->digest_length = auth_xform->digest_length;
+ return 0;
+}
+
+int
+qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ enum rte_crypto_auth_operation crypto_operation;
+
+ /*
+ * Store AEAD IV parameters as cipher IV,
+ * to avoid unnecessary memory usage
+ */
+ session->cipher_iv.offset = xform->aead.iv.offset;
+ session->cipher_iv.length = xform->aead.iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ if (qat_sym_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ if (qat_sym_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
+ break;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ return -EINVAL;
+ }
+
+ if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
+ (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ crypto_operation))
+ return -EINVAL;
+ } else {
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ crypto_operation))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+ }
+
+ session->digest_length = aead_xform->digest_length;
+ return 0;
+}
+
+unsigned int qat_sym_session_get_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
+}
/* returns block size in bytes per cipher algo */
int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
@@ -30,7 +737,7 @@ int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
case ICP_QAT_HW_CIPHER_ALGO_AES256:
return ICP_QAT_HW_AES_BLK_SZ;
default:
- PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
+ QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
return -EFAULT;
};
return -EFAULT;
@@ -77,18 +784,18 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_NULL:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
return -EFAULT;
};
return -EFAULT;
@@ -114,7 +821,7 @@ static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
/* return maximum digest size in this case */
return ICP_QAT_HW_SHA512_STATE1_SZ;
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
return -EFAULT;
};
return -EFAULT;
@@ -142,7 +849,7 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
/* return maximum block size in this case */
return SHA512_CBLOCK;
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
return -EFAULT;
};
return -EFAULT;
@@ -226,7 +933,6 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
uint64_t *hash_state_out_be64;
int i;
- PMD_INIT_FUNC_TRACE();
digest_size = qat_hash_get_digest_size(hash_alg);
if (digest_size <= 0)
return -EFAULT;
@@ -275,7 +981,7 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
return -EFAULT;
break;
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
return -EFAULT;
}
@@ -285,7 +991,7 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
#define HMAC_OPAD_VALUE 0x5c
#define HASH_XCBC_PRECOMP_KEY_NUM 3
-static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
+static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
const uint8_t *auth_key,
uint16_t auth_keylen,
uint8_t *p_state_buf,
@@ -296,7 +1002,6 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
int i;
- PMD_INIT_FUNC_TRACE();
if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
static uint8_t qat_aes_xcbc_key_seed[
ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
@@ -316,7 +1021,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
in = rte_zmalloc("working mem for key",
ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
if (in == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory");
+ QAT_LOG(ERR, "Failed to alloc memory");
return -ENOMEM;
}
@@ -351,7 +1056,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
in = rte_zmalloc("working mem for key",
ICP_QAT_HW_GALOIS_H_SZ, 16);
if (in == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory");
+ QAT_LOG(ERR, "Failed to alloc memory");
return -ENOMEM;
}
@@ -376,7 +1081,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
memset(opad, 0, block_size);
if (auth_keylen > (unsigned int)block_size) {
- PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
+ QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
return -EFAULT;
}
rte_memcpy(ipad, auth_key, auth_keylen);
@@ -393,7 +1098,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
memset(ipad, 0, block_size);
memset(opad, 0, block_size);
- PMD_DRV_LOG(ERR, "ipad precompute failed");
+ QAT_LOG(ERR, "ipad precompute failed");
return -EFAULT;
}
@@ -405,7 +1110,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
memset(ipad, 0, block_size);
memset(opad, 0, block_size);
- PMD_DRV_LOG(ERR, "opad precompute failed");
+ QAT_LOG(ERR, "opad precompute failed");
return -EFAULT;
}
@@ -415,10 +1120,10 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
return 0;
}
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- enum qat_crypto_proto_flag proto_flags)
+static void
+qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_sym_proto_flag proto_flags)
{
- PMD_INIT_FUNC_TRACE();
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
@@ -464,11 +1169,11 @@ void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
* and set its protocol flag in both cipher and auth part of content
* descriptor building function
*/
-static enum qat_crypto_proto_flag
+static enum qat_sym_proto_flag
qat_get_crypto_proto_flag(uint16_t flags)
{
int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
switch (proto) {
@@ -483,7 +1188,7 @@ qat_get_crypto_proto_flag(uint16_t flags)
return qat_proto_flag;
}
-int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
+int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
uint8_t *cipherkey,
uint32_t cipherkeylen)
{
@@ -495,13 +1200,12 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
enum icp_qat_hw_cipher_convert key_convert;
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
uint32_t total_key_size;
uint16_t cipher_offset, cd_size;
uint32_t wordIndex = 0;
uint32_t *temp_key = NULL;
- PMD_INIT_FUNC_TRACE();
if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
@@ -526,7 +1230,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
ICP_QAT_FW_SLICE_DRAM_WR);
cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
} else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
+ QAT_LOG(ERR, "Invalid param, must be a cipher command.");
return -EFAULT;
}
@@ -587,7 +1291,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
header->service_cmd_id = cdesc->qat_cmd;
- qat_alg_init_common_hdr(header, qat_proto_flag);
+ qat_sym_session_init_common_hdr(header, qat_proto_flag);
cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
cipher->cipher_config.val =
@@ -618,11 +1322,19 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
if (total_key_size > cipherkeylen) {
uint32_t padding_size = total_key_size-cipherkeylen;
if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
- && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
/* K3 not provided so use K1 = K3*/
memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
- else
+ } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
+ /* K2 and K3 not provided so use K1 = K2 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey,
+ cipherkeylen);
+ memcpy(cdesc->cd_cur_ptr+cipherkeylen,
+ cipherkey, cipherkeylen);
+ } else
memset(cdesc->cd_cur_ptr, 0, padding_size);
+
cdesc->cd_cur_ptr += padding_size;
}
cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
@@ -631,7 +1343,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
return 0;
}
-int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
+int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
uint32_t aad_length,
@@ -655,11 +1367,9 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint32_t *aad_len = NULL;
uint32_t wordIndex = 0;
uint32_t *pTempKey;
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
- PMD_INIT_FUNC_TRACE();
-
if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
ICP_QAT_FW_SLICE_AUTH);
@@ -677,7 +1387,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
ICP_QAT_FW_SLICE_DRAM_WR);
cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
} else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
+ QAT_LOG(ERR, "Invalid param, must be a hash command.");
return -EFAULT;
}
@@ -720,51 +1430,51 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
*/
switch (cdesc->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA224:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA384:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
&state2_size)) {
- PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
+ QAT_LOG(ERR, "(XCBC)precompute failed");
return -EFAULT;
}
break;
@@ -772,10 +1482,10 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
- if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
+ if (qat_sym_do_precomputes(cdesc->qat_hash_alg,
authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
&state2_size)) {
- PMD_DRV_LOG(ERR, "(GCM)precompute failed");
+ QAT_LOG(ERR, "(GCM)precompute failed");
return -EFAULT;
}
/*
@@ -832,10 +1542,10 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
authkey, authkeylen, cdesc->cd_cur_ptr,
&state1_size)) {
- PMD_DRV_LOG(ERR, "(MD5)precompute failed");
+ QAT_LOG(ERR, "(MD5)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
@@ -854,14 +1564,13 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
if (aad_length > 0) {
aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
- ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
auth_param->u2.aad_sz =
- RTE_ALIGN_CEIL(aad_length,
- ICP_QAT_HW_CCM_AAD_ALIGNMENT);
+ RTE_ALIGN_CEIL(aad_length,
+ ICP_QAT_HW_CCM_AAD_ALIGNMENT);
} else {
auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
}
-
cdesc->aad_len = aad_length;
hash->auth_counter.counter = 0;
@@ -891,12 +1600,12 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
+ QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
return -EFAULT;
}
/* Request template setup */
- qat_alg_init_common_hdr(header, qat_proto_flag);
+ qat_sym_session_init_common_hdr(header, qat_proto_flag);
header->service_cmd_id = cdesc->qat_cmd;
/* Auth CD config setup */
@@ -922,7 +1631,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
return 0;
}
-int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_AES_128_KEY_SZ:
@@ -940,7 +1649,7 @@ int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_aes_docsisbpi_key(int key_len,
+int qat_sym_validate_aes_docsisbpi_key(int key_len,
enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
@@ -953,7 +1662,7 @@ int qat_alg_validate_aes_docsisbpi_key(int key_len,
return 0;
}
-int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
@@ -965,7 +1674,7 @@ int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_KASUMI_KEY_SZ:
@@ -977,7 +1686,7 @@ int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_DES_KEY_SZ:
@@ -989,11 +1698,12 @@ int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case QAT_3DES_KEY_SZ_OPT1:
case QAT_3DES_KEY_SZ_OPT2:
+ case QAT_3DES_KEY_SZ_OPT3:
*alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
break;
default:
@@ -1002,7 +1712,7 @@ int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
new file mode 100644
index 00000000..e8f51e5b
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _QAT_SYM_SESSION_H_
+#define _QAT_SYM_SESSION_H_
+
+#include <rte_crypto.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_common.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+/*
+ * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
+ * Integrity Key (IK)
+ */
+#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
+
+#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
+
+/* 3DES key sizes */
+#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
+#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
+#define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */
+
+
+#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+enum qat_sym_proto_flag {
+ QAT_CRYPTO_PROTO_FLAG_NONE = 0,
+ QAT_CRYPTO_PROTO_FLAG_CCM = 1,
+ QAT_CRYPTO_PROTO_FLAG_GCM = 2,
+ QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
+ QAT_CRYPTO_PROTO_FLAG_ZUC = 4
+};
+
+/* Common content descriptor */
+struct qat_sym_cd {
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_session {
+ enum icp_qat_fw_la_cmd_id qat_cmd;
+ enum icp_qat_hw_cipher_algo qat_cipher_alg;
+ enum icp_qat_hw_cipher_dir qat_dir;
+ enum icp_qat_hw_cipher_mode qat_mode;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ enum icp_qat_hw_auth_op auth_op;
+ void *bpi_ctx;
+ struct qat_sym_cd cd;
+ uint8_t *cd_cur_ptr;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ uint8_t aad_len;
+ struct qat_crypto_instance *inst;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } cipher_iv;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } auth_iv;
+ uint16_t digest_length;
+ rte_spinlock_t lock; /* protects this struct */
+ enum qat_device_gen min_qat_dev_gen;
+};
+
+int
+qat_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool);
+
+int
+qat_sym_session_set_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+int
+qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_configure_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cd,
+ uint8_t *enckey,
+ uint32_t enckeylen);
+
+int
+qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t aad_length,
+ uint32_t digestsize,
+ unsigned int operation);
+
+void
+qat_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *session);
+
+unsigned int
+qat_sym_session_get_private_size(struct rte_cryptodev *dev);
+
+void
+qat_sym_sesssion_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_sym_proto_flag proto_flags);
+int
+qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
+int
+qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+
+#endif /* _QAT_SYM_SESSION_H_ */
diff --git a/drivers/crypto/qat/rte_pmd_qat_version.map b/drivers/crypto/qat/rte_pmd_qat_version.map
deleted file mode 100644
index bbaf1c85..00000000
--- a/drivers/crypto/qat/rte_pmd_qat_version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_2.2 {
- local: *;
-}; \ No newline at end of file
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
deleted file mode 100644
index c8da07af..00000000
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2018 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <rte_cryptodev_pmd.h>
-
-#include "qat_crypto.h"
-#include "qat_logs.h"
-
-uint8_t cryptodev_qat_driver_id;
-
-static const struct rte_cryptodev_capabilities qat_gen1_capabilities[] = {
- QAT_BASE_GEN1_SYM_CAPABILITIES,
- RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-static const struct rte_cryptodev_capabilities qat_gen2_capabilities[] = {
- QAT_BASE_GEN1_SYM_CAPABILITIES,
- QAT_EXTRA_GEN2_SYM_CAPABILITIES,
- RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
- /* Device related operations */
- .dev_configure = qat_dev_config,
- .dev_start = qat_dev_start,
- .dev_stop = qat_dev_stop,
- .dev_close = qat_dev_close,
- .dev_infos_get = qat_dev_info_get,
-
- .stats_get = qat_crypto_sym_stats_get,
- .stats_reset = qat_crypto_sym_stats_reset,
- .queue_pair_setup = qat_crypto_sym_qp_setup,
- .queue_pair_release = qat_crypto_sym_qp_release,
- .queue_pair_start = NULL,
- .queue_pair_stop = NULL,
- .queue_pair_count = NULL,
-
- /* Crypto related operations */
- .session_get_size = qat_crypto_sym_get_session_private_size,
- .session_configure = qat_crypto_sym_configure_session,
- .session_clear = qat_crypto_sym_clear_session
-};
-
-/*
- * The set of PCI devices this driver supports
- */
-
-static const struct rte_pci_id pci_id_qat_map[] = {
- {
- RTE_PCI_DEVICE(0x8086, 0x0443),
- },
- {
- RTE_PCI_DEVICE(0x8086, 0x37c9),
- },
- {
- RTE_PCI_DEVICE(0x8086, 0x19e3),
- },
- {
- RTE_PCI_DEVICE(0x8086, 0x6f55),
- },
- {.device_id = 0},
-};
-
-static int
-crypto_qat_create(const char *name, struct rte_pci_device *pci_dev,
- struct rte_cryptodev_pmd_init_params *init_params)
-{
- struct rte_cryptodev *cryptodev;
- struct qat_pmd_private *internals;
-
- PMD_INIT_FUNC_TRACE();
-
- cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
- init_params);
- if (cryptodev == NULL)
- return -ENODEV;
-
- cryptodev->driver_id = cryptodev_qat_driver_id;
- cryptodev->dev_ops = &crypto_qat_ops;
-
- cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
- cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
-
- cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
- RTE_CRYPTODEV_FF_HW_ACCELERATED |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
-
- internals = cryptodev->data->dev_private;
- internals->max_nb_sessions = init_params->max_nb_sessions;
- switch (pci_dev->id.device_id) {
- case 0x0443:
- internals->qat_dev_gen = QAT_GEN1;
- internals->qat_dev_capabilities = qat_gen1_capabilities;
- break;
- case 0x37c9:
- case 0x19e3:
- case 0x6f55:
- internals->qat_dev_gen = QAT_GEN2;
- internals->qat_dev_capabilities = qat_gen2_capabilities;
- break;
- default:
- PMD_DRV_LOG(ERR,
- "Invalid dev_id, can't determine capabilities");
- break;
- }
-
- /*
- * For secondary processes, we don't initialise any further as primary
- * has already done this work. Only check we don't need a different
- * RX function
- */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_DRV_LOG(DEBUG, "Device already initialised by primary process");
- return 0;
- }
-
- return 0;
-}
-
-static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
-{
- struct rte_cryptodev_pmd_init_params init_params = {
- .name = "",
- .socket_id = pci_dev->device.numa_node,
- .private_data_size = sizeof(struct qat_pmd_private),
- .max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS
- };
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-
- PMD_DRV_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
- pci_dev->addr.bus,
- pci_dev->addr.devid,
- pci_dev->addr.function);
-
- rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
-
- return crypto_qat_create(name, pci_dev, &init_params);
-}
-
-static int crypto_qat_pci_remove(struct rte_pci_device *pci_dev)
-{
- struct rte_cryptodev *cryptodev;
- char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-
- if (pci_dev == NULL)
- return -EINVAL;
-
- rte_pci_device_name(&pci_dev->addr, cryptodev_name,
- sizeof(cryptodev_name));
-
- cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
- if (cryptodev == NULL)
- return -ENODEV;
-
- /* free crypto device */
- return rte_cryptodev_pmd_destroy(cryptodev);
-}
-
-static struct rte_pci_driver rte_qat_pmd = {
- .id_table = pci_id_qat_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = crypto_qat_pci_probe,
- .remove = crypto_qat_pci_remove
-};
-
-static struct cryptodev_driver qat_crypto_drv;
-
-RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd);
-RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd.driver,
- cryptodev_qat_driver_id);
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index ed574cc1..6e4919c4 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -9,6 +9,8 @@
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
+int scheduler_logtype_driver;
+
/** update the scheduler pmd's capability with attaching device's
* capability.
* For each device to be attached, the scheduler's capability should be
@@ -168,30 +170,30 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
uint32_t i;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
sched_ctx = dev->data->dev_private;
if (sched_ctx->nb_slaves >=
RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
- CS_LOG_ERR("Too many slaves attached");
+ CR_SCHED_LOG(ERR, "Too many slaves attached");
return -ENOMEM;
}
for (i = 0; i < sched_ctx->nb_slaves; i++)
if (sched_ctx->slaves[i].dev_id == slave_id) {
- CS_LOG_ERR("Slave already added");
+ CR_SCHED_LOG(ERR, "Slave already added");
return -ENOTSUP;
}
@@ -208,7 +210,7 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
slave->driver_id = 0;
sched_ctx->nb_slaves--;
- CS_LOG_ERR("capabilities update failed");
+ CR_SCHED_LOG(ERR, "capabilities update failed");
return -ENOTSUP;
}
@@ -227,17 +229,17 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
uint32_t i, slave_pos;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -247,12 +249,12 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
break;
if (slave_pos == sched_ctx->nb_slaves) {
- CS_LOG_ERR("Cannot find slave");
+ CR_SCHED_LOG(ERR, "Cannot find slave");
return -ENOTSUP;
}
if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
- CS_LOG_ERR("Failed to detach slave");
+ CR_SCHED_LOG(ERR, "Failed to detach slave");
return -ENOTSUP;
}
@@ -265,7 +267,7 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
sched_ctx->nb_slaves--;
if (update_scheduler_capability(sched_ctx) < 0) {
- CS_LOG_ERR("capabilities update failed");
+ CR_SCHED_LOG(ERR, "capabilities update failed");
return -ENOTSUP;
}
@@ -284,17 +286,17 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -307,33 +309,33 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
case CDEV_SCHED_MODE_ROUNDROBIN:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
roundrobin_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
pkt_size_based_distr_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_FAILOVER:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
failover_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_MULTICORE:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
multicore_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
default:
- CS_LOG_ERR("Not yet supported");
+ CR_SCHED_LOG(ERR, "Not yet supported");
return -ENOTSUP;
}
@@ -347,12 +349,12 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -369,17 +371,17 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -397,12 +399,12 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -419,25 +421,25 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
sched_ctx = dev->data->dev_private;
if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- CS_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes.\n", scheduler->name,
+ CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
+ "%u bytes.", scheduler->name,
RTE_CRYPTODEV_NAME_MAX_LEN);
return -EINVAL;
}
@@ -446,8 +448,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
if (strlen(scheduler->description) >
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
- CS_LOG_ERR("Invalid description %s, should be less than "
- "%u bytes.\n", scheduler->description,
+ CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
+ "%u bytes.", scheduler->description,
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
return -EINVAL;
}
@@ -473,7 +475,7 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
int ret = (*sched_ctx->ops.create_private_ctx)(dev);
if (ret < 0) {
- CS_LOG_ERR("Unable to create scheduler private "
+ CR_SCHED_LOG(ERR, "Unable to create scheduler private "
"context");
return ret;
}
@@ -492,12 +494,12 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
uint32_t nb_slaves = 0;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -525,17 +527,17 @@ rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
option_type >= CDEV_SCHED_OPTION_COUNT) {
- CS_LOG_ERR("Invalid option parameter");
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
return -EINVAL;
}
if (!option) {
- CS_LOG_ERR("Invalid option parameter");
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
return -EINVAL;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -555,17 +557,17 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (!option) {
- CS_LOG_ERR("Invalid option parameter");
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
return -EINVAL;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -575,3 +577,8 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
return (*sched_ctx->ops.option_get)(dev, option_type, option);
}
+
+RTE_INIT(scheduler_init_log)
+{
+ scheduler_logtype_driver = rte_log_register("pmd.crypto.scheduler");
+}
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 1c164da7..3faea409 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -76,6 +76,7 @@ enum rte_cryptodev_schedule_option_type {
/**
* Threshold option structure
*/
+#define RTE_CRYPTODEV_SCHEDULER_PARAM_THRES "threshold"
struct rte_cryptodev_scheduler_threshold_option {
uint32_t threshold; /**< Threshold for packet-size mode */
};
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index 005b1638..ddfb5b81 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -139,7 +139,7 @@ scheduler_start(struct rte_cryptodev *dev)
uint16_t i;
if (sched_ctx->nb_slaves < 2) {
- CS_LOG_ERR("Number of slaves shall no less than 2");
+ CR_SCHED_LOG(ERR, "Number of slaves shall no less than 2");
return -ENOMEM;
}
@@ -182,7 +182,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
rte_socket_id());
if (!fo_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index 91fb0668..d410e69d 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -178,7 +178,8 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
}
}
if (worker_idx == -1) {
- CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
+ CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!",
+ core_id);
return -1;
}
@@ -313,7 +314,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
rte_socket_id());
if (!mc_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
@@ -339,7 +340,7 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
rte_socket_id());
if (!mc_ctx) {
- CS_LOG_ERR("failed allocate memory");
+ CR_SCHED_LOG(ERR, "failed allocate memory");
return -ENOMEM;
}
@@ -356,7 +357,7 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
rte_socket_id(),
RING_F_SC_DEQ | RING_F_SP_ENQ);
if (!mc_ctx->sched_enq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u",
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
i);
goto exit;
}
@@ -370,7 +371,7 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
rte_socket_id(),
RING_F_SC_DEQ | RING_F_SP_ENQ);
if (!mc_ctx->sched_deq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u",
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
i);
goto exit;
}
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index d09e849a..74129b66 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -258,7 +258,7 @@ scheduler_start(struct rte_cryptodev *dev)
/* for packet size based scheduler, nb_slaves have to >= 2 */
if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
- CS_LOG_ERR("not enough slaves to start");
+ CR_SCHED_LOG(ERR, "not enough slaves to start");
return -1;
}
@@ -302,7 +302,7 @@ scheduler_stop(struct rte_cryptodev *dev)
if (ps_qp_ctx->primary_slave.nb_inflight_cops +
ps_qp_ctx->secondary_slave.nb_inflight_cops) {
- CS_LOG_ERR("Some crypto ops left in slave queue");
+ CR_SCHED_LOG(ERR, "Some crypto ops left in slave queue");
return -1;
}
}
@@ -319,7 +319,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
rte_socket_id());
if (!ps_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
@@ -342,7 +342,7 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
rte_socket_id());
if (!psd_ctx) {
- CS_LOG_ERR("failed allocate memory");
+ CR_SCHED_LOG(ERR, "failed allocate memory");
return -ENOMEM;
}
@@ -362,14 +362,14 @@ scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type,
if ((enum rte_cryptodev_schedule_option_type)option_type !=
CDEV_SCHED_OPTION_THRESHOLD) {
- CS_LOG_ERR("Option not supported");
+ CR_SCHED_LOG(ERR, "Option not supported");
return -EINVAL;
}
threshold = ((struct rte_cryptodev_scheduler_threshold_option *)
option)->threshold;
if (!rte_is_power_of_2(threshold)) {
- CS_LOG_ERR("Threshold is not power of 2");
+ CR_SCHED_LOG(ERR, "Threshold is not power of 2");
return -EINVAL;
}
@@ -388,7 +388,7 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
if ((enum rte_cryptodev_schedule_option_type)option_type !=
CDEV_SCHED_OPTION_THRESHOLD) {
- CS_LOG_ERR("Option not supported");
+ CR_SCHED_LOG(ERR, "Option not supported");
return -EINVAL;
}
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 25d6409f..a9221a94 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -9,6 +9,7 @@
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <rte_reorder.h>
+#include <rte_string_fns.h>
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
@@ -19,6 +20,7 @@ struct scheduler_init_params {
struct rte_cryptodev_pmd_init_params def_p;
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
+ char mode_param_str[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
uint32_t enable_ordering;
uint16_t wc_pool[RTE_MAX_LCORE];
uint16_t nb_wc;
@@ -29,9 +31,9 @@ struct scheduler_init_params {
#define RTE_CRYPTODEV_VDEV_NAME ("name")
#define RTE_CRYPTODEV_VDEV_SLAVE ("slave")
#define RTE_CRYPTODEV_VDEV_MODE ("mode")
+#define RTE_CRYPTODEV_VDEV_MODE_PARAM ("mode_param")
#define RTE_CRYPTODEV_VDEV_ORDERING ("ordering")
#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
-#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
@@ -40,9 +42,9 @@ const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_NAME,
RTE_CRYPTODEV_VDEV_SLAVE,
RTE_CRYPTODEV_VDEV_MODE,
+ RTE_CRYPTODEV_VDEV_MODE_PARAM,
RTE_CRYPTODEV_VDEV_ORDERING,
RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
RTE_CRYPTODEV_VDEV_SOCKET_ID,
RTE_CRYPTODEV_VDEV_COREMASK,
RTE_CRYPTODEV_VDEV_CORELIST
@@ -69,6 +71,8 @@ const struct scheduler_parse_map scheduler_ordering_map[] = {
{"disable", 0}
};
+#define CDEV_SCHED_MODE_PARAM_SEP_CHAR ':'
+
static int
cryptodev_scheduler_create(const char *name,
struct rte_vdev_device *vdev,
@@ -82,7 +86,7 @@ cryptodev_scheduler_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device,
&init_params->def_p);
if (dev == NULL) {
- CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
+ CR_SCHED_LOG(ERR, "driver %s: failed to create cryptodev vdev",
name);
return -EFAULT;
}
@@ -101,13 +105,22 @@ cryptodev_scheduler_create(const char *name,
for (i = 0; i < sched_ctx->nb_wc; i++) {
sched_ctx->wc_pool[i] = init_params->wc_pool[i];
- RTE_LOG(INFO, PMD, " Worker core[%u]=%u added\n",
+ CR_SCHED_LOG(INFO, " Worker core[%u]=%u added",
i, sched_ctx->wc_pool[i]);
}
}
if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
init_params->mode < CDEV_SCHED_MODE_COUNT) {
+ union {
+ struct rte_cryptodev_scheduler_threshold_option
+ threshold_option;
+ } option;
+ enum rte_cryptodev_schedule_option_type option_type;
+ char param_name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0};
+ char param_val[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0};
+ char *s, *end;
+
ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
init_params->mode);
if (ret < 0) {
@@ -119,10 +132,52 @@ cryptodev_scheduler_create(const char *name,
if (scheduler_mode_map[i].val != sched_ctx->mode)
continue;
- RTE_LOG(INFO, PMD, " Scheduling mode = %s\n",
+ CR_SCHED_LOG(INFO, " Scheduling mode = %s",
scheduler_mode_map[i].name);
break;
}
+
+ if (strlen(init_params->mode_param_str) > 0) {
+ s = strchr(init_params->mode_param_str,
+ CDEV_SCHED_MODE_PARAM_SEP_CHAR);
+ if (s == NULL) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ strlcpy(param_name, init_params->mode_param_str,
+ s - init_params->mode_param_str + 1);
+ s++;
+ strlcpy(param_val, s,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ switch (init_params->mode) {
+ case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
+ if (strcmp(param_name,
+ RTE_CRYPTODEV_SCHEDULER_PARAM_THRES)
+ != 0) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+ option_type = CDEV_SCHED_OPTION_THRESHOLD;
+
+ option.threshold_option.threshold =
+ strtoul(param_val, &end, 0);
+ break;
+ default:
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ if (sched_ctx->ops.option_set(dev, option_type,
+ (void *)&option) < 0) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ RTE_LOG(INFO, PMD, " Sched mode param (%s = %s)\n",
+ param_name, param_val);
+ }
}
sched_ctx->reordering_enabled = init_params->enable_ordering;
@@ -132,7 +187,7 @@ cryptodev_scheduler_create(const char *name,
sched_ctx->reordering_enabled)
continue;
- RTE_LOG(INFO, PMD, " Packet ordering = %s\n",
+ CR_SCHED_LOG(INFO, " Packet ordering = %s",
scheduler_ordering_map[i].name);
break;
@@ -147,7 +202,7 @@ cryptodev_scheduler_create(const char *name,
if (!sched_ctx->init_slave_names[
sched_ctx->nb_init_slaves]) {
- CS_LOG_ERR("driver %s: Insufficient memory",
+ CR_SCHED_LOG(ERR, "driver %s: Insufficient memory",
name);
return -ENOMEM;
}
@@ -169,8 +224,8 @@ cryptodev_scheduler_create(const char *name,
0, SOCKET_ID_ANY);
if (!sched_ctx->capabilities) {
- RTE_LOG(ERR, PMD, "Not enough memory for capability "
- "information\n");
+ CR_SCHED_LOG(ERR, "Not enough memory for capability "
+ "information");
return -ENOMEM;
}
@@ -214,7 +269,7 @@ parse_integer_arg(const char *key __rte_unused,
*i = atoi(value);
if (*i < 0) {
- CS_LOG_ERR("Argument has to be positive.\n");
+ CR_SCHED_LOG(ERR, "Argument has to be positive.");
return -EINVAL;
}
@@ -287,8 +342,8 @@ parse_corelist_arg(const char *key __rte_unused,
unsigned int core = strtoul(token, &rval, 10);
if (core >= RTE_MAX_LCORE) {
- CS_LOG_ERR("Invalid worker core %u, should be smaller "
- "than %u.\n", core, RTE_MAX_LCORE);
+ CR_SCHED_LOG(ERR, "Invalid worker core %u, should be smaller "
+ "than %u.", core, RTE_MAX_LCORE);
}
params->wc_pool[params->nb_wc++] = (uint16_t)core;
token = (const char *)rval;
@@ -308,8 +363,8 @@ parse_name_arg(const char *key __rte_unused,
struct rte_cryptodev_pmd_init_params *params = extra_args;
if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- CS_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes.\n", value,
+ CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
+ "%u bytes.", value,
RTE_CRYPTODEV_NAME_MAX_LEN - 1);
return -EINVAL;
}
@@ -327,7 +382,7 @@ parse_slave_arg(const char *key __rte_unused,
struct scheduler_init_params *param = extra_args;
if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
- CS_LOG_ERR("Too many slaves.\n");
+ CR_SCHED_LOG(ERR, "Too many slaves.");
return -ENOMEM;
}
@@ -348,12 +403,13 @@ parse_mode_arg(const char *key __rte_unused,
if (strcmp(value, scheduler_mode_map[i].name) == 0) {
param->mode = (enum rte_cryptodev_scheduler_mode)
scheduler_mode_map[i].val;
+
break;
}
}
if (i == RTE_DIM(scheduler_mode_map)) {
- CS_LOG_ERR("Unrecognized input.\n");
+ CR_SCHED_LOG(ERR, "Unrecognized input.");
return -EINVAL;
}
@@ -361,6 +417,18 @@ parse_mode_arg(const char *key __rte_unused,
}
static int
+parse_mode_param_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+
+ strlcpy(param->mode_param_str, value,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int
parse_ordering_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
@@ -376,7 +444,7 @@ parse_ordering_arg(const char *key __rte_unused,
}
if (i == RTE_DIM(scheduler_ordering_map)) {
- CS_LOG_ERR("Unrecognized input.\n");
+ CR_SCHED_LOG(ERR, "Unrecognized input.");
return -EINVAL;
}
@@ -406,13 +474,6 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
if (ret < 0)
goto free_kvlist;
- ret = rte_kvargs_process(kvlist,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- &parse_integer_arg,
- &params->def_p.max_nb_sessions);
- if (ret < 0)
- goto free_kvlist;
-
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
&parse_integer_arg,
&params->def_p.socket_id);
@@ -447,6 +508,11 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
if (ret < 0)
goto free_kvlist;
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE_PARAM,
+ &parse_mode_param_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING,
&parse_ordering_arg, params);
if (ret < 0)
@@ -466,8 +532,7 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct scheduler_ctx),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
},
.nb_slaves = 0,
.mode = CDEV_SCHED_MODE_NOT_SET,
@@ -500,7 +565,6 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
cryptodev_scheduler_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int> "
"slave=<name>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 147dc51e..778071ca 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -27,7 +27,7 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev)
int status;
if (!slave_dev) {
- CS_LOG_ERR("Failed to locate slave dev %s",
+ CR_SCHED_LOG(ERR, "Failed to locate slave dev %s",
dev_name);
return -EINVAL;
}
@@ -36,12 +36,12 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev)
scheduler_id, slave_dev->data->dev_id);
if (status < 0) {
- CS_LOG_ERR("Failed to attach slave cryptodev %u",
+ CR_SCHED_LOG(ERR, "Failed to attach slave cryptodev %u",
slave_dev->data->dev_id);
return status;
}
- CS_LOG_INFO("Scheduler %s attached slave %s\n",
+ CR_SCHED_LOG(INFO, "Scheduler %s attached slave %s",
dev->data->name,
sched_ctx->init_slave_names[i]);
@@ -102,7 +102,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
dev->data->dev_id, qp_id) < 0) {
- CS_LOG_ERR("failed to create unique reorder buffer "
+ CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
"name");
return -ENOMEM;
}
@@ -111,7 +111,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
buff_size, rte_socket_id(),
RING_F_SP_ENQ | RING_F_SC_DEQ);
if (!qp_ctx->order_ring) {
- CS_LOG_ERR("failed to create order ring");
+ CR_SCHED_LOG(ERR, "failed to create order ring");
return -ENOMEM;
}
} else {
@@ -145,18 +145,18 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
ret = update_order_ring(dev, i);
if (ret < 0) {
- CS_LOG_ERR("Failed to update reorder buffer");
+ CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
return ret;
}
}
if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
- CS_LOG_ERR("Scheduler mode is not set");
+ CR_SCHED_LOG(ERR, "Scheduler mode is not set");
return -1;
}
if (!sched_ctx->nb_slaves) {
- CS_LOG_ERR("No slave in the scheduler");
+ CR_SCHED_LOG(ERR, "No slave in the scheduler");
return -1;
}
@@ -166,7 +166,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
- CS_LOG_ERR("Failed to attach slave");
+ CR_SCHED_LOG(ERR, "Failed to attach slave");
return -ENOTSUP;
}
}
@@ -174,7 +174,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
- CS_LOG_ERR("Scheduler start failed");
+ CR_SCHED_LOG(ERR, "Scheduler start failed");
return -1;
}
@@ -186,7 +186,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
if (ret < 0) {
- CS_LOG_ERR("Failed to start slave dev %u",
+ CR_SCHED_LOG(ERR, "Failed to start slave dev %u",
slave_dev_id);
return ret;
}
@@ -321,8 +321,9 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
struct rte_cryptodev_info *dev_info)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
- uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
- UINT32_MAX : RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
+ uint32_t max_nb_sess = 0;
+ uint16_t headroom_sz = 0;
+ uint16_t tailroom_sz = 0;
uint32_t i;
if (!dev_info)
@@ -338,17 +339,32 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
struct rte_cryptodev_info slave_info;
rte_cryptodev_info_get(slave_dev_id, &slave_info);
- max_nb_sessions = slave_info.sym.max_nb_sessions <
- max_nb_sessions ?
- slave_info.sym.max_nb_sessions :
- max_nb_sessions;
+ uint32_t dev_max_sess = slave_info.sym.max_nb_sessions;
+ if (dev_max_sess != 0) {
+ if (max_nb_sess == 0 || dev_max_sess < max_nb_sess)
+ max_nb_sess = slave_info.sym.max_nb_sessions;
+ }
+
+ /* Get the max headroom requirement among slave PMDs */
+ headroom_sz = slave_info.min_mbuf_headroom_req >
+ headroom_sz ?
+ slave_info.min_mbuf_headroom_req :
+ headroom_sz;
+
+ /* Get the max tailroom requirement among slave PMDs */
+ tailroom_sz = slave_info.min_mbuf_tailroom_req >
+ tailroom_sz ?
+ slave_info.min_mbuf_tailroom_req :
+ tailroom_sz;
}
dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = sched_ctx->capabilities;
dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = max_nb_sessions;
+ dev_info->min_mbuf_headroom_req = headroom_sz;
+ dev_info->min_mbuf_tailroom_req = tailroom_sz;
+ dev_info->sym.max_nb_sessions = max_nb_sess;
}
/** Release queue pair */
@@ -386,7 +402,7 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
"CRYTO_SCHE PMD %u QP %u",
dev->data->dev_id, qp_id) < 0) {
- CS_LOG_ERR("Failed to create unique queue pair name");
+ CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
return -EFAULT;
}
@@ -424,14 +440,14 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
*/
ret = scheduler_attach_init_slave(dev);
if (ret < 0) {
- CS_LOG_ERR("Failed to attach slave");
+ CR_SCHED_LOG(ERR, "Failed to attach slave");
scheduler_pmd_qp_release(dev, qp_id);
return ret;
}
if (*sched_ctx->ops.config_queue_pair) {
if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
- CS_LOG_ERR("Unable to configure queue pair");
+ CR_SCHED_LOG(ERR, "Unable to configure queue pair");
return -1;
}
}
@@ -439,22 +455,6 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return 0;
}
-/** Start queue pair */
-static int
-scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
scheduler_pmd_qp_count(struct rte_cryptodev *dev)
@@ -463,7 +463,7 @@ scheduler_pmd_qp_count(struct rte_cryptodev *dev)
}
static uint32_t
-scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint8_t i = 0;
@@ -473,7 +473,7 @@ scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
- uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
+ uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
if (max_priv_sess_size < priv_sess_size)
max_priv_sess_size = priv_sess_size;
@@ -483,7 +483,7 @@ scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
static int
-scheduler_pmd_session_configure(struct rte_cryptodev *dev,
+scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -498,7 +498,7 @@ scheduler_pmd_session_configure(struct rte_cryptodev *dev,
ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
xform, mempool);
if (ret < 0) {
- CS_LOG_ERR("unabled to config sym session");
+ CR_SCHED_LOG(ERR, "unable to config sym session");
return ret;
}
}
@@ -508,7 +508,7 @@ scheduler_pmd_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-scheduler_pmd_session_clear(struct rte_cryptodev *dev,
+scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
@@ -535,13 +535,11 @@ struct rte_cryptodev_ops scheduler_pmd_ops = {
.queue_pair_setup = scheduler_pmd_qp_setup,
.queue_pair_release = scheduler_pmd_qp_release,
- .queue_pair_start = scheduler_pmd_qp_start,
- .queue_pair_stop = scheduler_pmd_qp_stop,
.queue_pair_count = scheduler_pmd_qp_count,
- .session_get_size = scheduler_pmd_session_get_size,
- .session_configure = scheduler_pmd_session_configure,
- .session_clear = scheduler_pmd_session_clear,
+ .sym_session_get_size = scheduler_pmd_sym_session_get_size,
+ .sym_session_configure = scheduler_pmd_sym_session_configure,
+ .sym_session_clear = scheduler_pmd_sym_session_clear,
};
struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index 12410b48..d5e602a2 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -12,25 +12,11 @@
#define PER_SLAVE_BUFF_SIZE (256)
-#define CS_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG
-#define CS_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
- __func__, __LINE__, ## args)
-
-#define CS_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define CS_LOG_INFO(fmt, args...)
-#define CS_LOG_DBG(fmt, args...)
-#endif
+extern int scheduler_logtype_driver;
+
+#define CR_SCHED_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
struct scheduler_slave {
uint8_t dev_id;
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index c6e03e21..c7082a64 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -175,7 +175,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
rte_socket_id());
if (!rr_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 72751e35..a17536b7 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <rte_common.h>
@@ -79,7 +79,7 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
break;
case SNOW3G_OP_NOT_SUPPORTED:
default:
- SNOW3G_LOG_ERR("Unsupported operation chain order parameter");
+ SNOW3G_LOG(ERR, "Unsupported operation chain order parameter");
return -ENOTSUP;
}
@@ -89,7 +89,7 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
return -ENOTSUP;
if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
- SNOW3G_LOG_ERR("Wrong IV length");
+ SNOW3G_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
@@ -105,14 +105,14 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
return -ENOTSUP;
if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
- SNOW3G_LOG_ERR("Wrong digest length");
+ SNOW3G_LOG(ERR, "Wrong digest length");
return -EINVAL;
}
sess->auth_op = auth_xform->auth.op;
if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
- SNOW3G_LOG_ERR("Wrong IV length");
+ SNOW3G_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
sess->auth_iv_offset = auth_xform->auth.iv.offset;
@@ -137,7 +137,7 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(op->sym->session != NULL))
sess = (struct snow3g_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
} else {
@@ -159,8 +159,8 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -216,7 +216,7 @@ process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
if (op->sym->m_dst == NULL) {
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("bit-level in-place not supported\n");
+ SNOW3G_LOG(ERR, "bit-level in-place not supported\n");
return 0;
}
dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
@@ -246,7 +246,7 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("Offset");
+ SNOW3G_LOG(ERR, "Offset");
break;
}
@@ -295,7 +295,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
(ops[i]->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
ops[i]->sym->m_dst))) {
- SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, "
+ SNOW3G_LOG(ERR, "PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
"source/destination buffer.\n", ops[i]);
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -537,7 +537,7 @@ cryptodev_snow3g_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- SNOW3G_LOG_ERR("failed to create cryptodev vdev");
+ SNOW3G_LOG(ERR, "failed to create cryptodev vdev");
goto init_error;
}
@@ -555,11 +555,10 @@ cryptodev_snow3g_create(const char *name,
internals = dev->data->dev_private;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed",
+ SNOW3G_LOG(ERR, "driver %s: cryptodev_snow3g_create failed",
init_params->name);
cryptodev_snow3g_remove(vdev);
@@ -573,8 +572,7 @@ cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct snow3g_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -617,7 +615,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
cryptodev_snow3g_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(snow3g_init_log)
+{
+ snow3g_logtype_driver = rte_log_register("pmd.crypto.snow3g");
+}
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
index f60b4759..cfbc9522 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <string.h>
@@ -130,7 +130,8 @@ snow3g_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = snow3g_pmd_capabilities;
}
@@ -172,13 +173,13 @@ snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- SNOW3G_LOG_INFO("Reusing existing ring %s"
+ SNOW3G_LOG(INFO, "Reusing existing ring %s"
" for processed packets",
qp->name);
return r;
}
- SNOW3G_LOG_ERR("Unable to reuse existing ring %s"
+ SNOW3G_LOG(ERR, "Unable to reuse existing ring %s"
" for processed packets",
qp->name);
return NULL;
@@ -230,22 +231,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-snow3g_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-snow3g_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
snow3g_pmd_qp_count(struct rte_cryptodev *dev)
@@ -255,14 +240,14 @@ snow3g_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the SNOW 3G session structure */
static unsigned
-snow3g_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+snow3g_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct snow3g_session);
}
/** Configure a SNOW 3G session from a crypto xform chain */
static int
-snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+snow3g_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -271,26 +256,26 @@ snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
int ret;
if (unlikely(sess == NULL)) {
- SNOW3G_LOG_ERR("invalid session struct");
+ SNOW3G_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
+ SNOW3G_LOG(ERR,
"Couldn't get object from session mempool");
return -ENOMEM;
}
ret = snow3g_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- SNOW3G_LOG_ERR("failed configure session parameters");
+ SNOW3G_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -298,17 +283,17 @@ snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-snow3g_pmd_session_clear(struct rte_cryptodev *dev,
+snow3g_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct snow3g_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -326,13 +311,11 @@ struct rte_cryptodev_ops snow3g_pmd_ops = {
.queue_pair_setup = snow3g_pmd_qp_setup,
.queue_pair_release = snow3g_pmd_qp_release,
- .queue_pair_start = snow3g_pmd_qp_start,
- .queue_pair_stop = snow3g_pmd_qp_stop,
.queue_pair_count = snow3g_pmd_qp_count,
- .session_get_size = snow3g_pmd_session_get_size,
- .session_configure = snow3g_pmd_session_configure,
- .session_clear = snow3g_pmd_session_clear
+ .sym_session_get_size = snow3g_pmd_sym_session_get_size,
+ .sym_session_configure = snow3g_pmd_sym_session_configure,
+ .sym_session_clear = snow3g_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
index eea900e0..b7807b62 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#ifndef _RTE_SNOW3G_PMD_PRIVATE_H_
@@ -10,25 +10,13 @@
#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
/**< SNOW 3G PMD device name */
-#define SNOW3G_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_SNOW3G_DEBUG
-#define SNOW3G_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
- __func__, __LINE__, ## args)
-
-#define SNOW3G_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define SNOW3G_LOG_INFO(fmt, args...)
-#define SNOW3G_LOG_DBG(fmt, args...)
-#endif
+/** SNOW 3G PMD LOGTYPE DRIVER */
+int snow3g_logtype_driver;
+
+#define SNOW3G_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, snow3g_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
#define SNOW3G_DIGEST_LENGTH 4
@@ -36,8 +24,6 @@
struct snow3g_private {
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
/** SNOW 3G buffer queue pair */
diff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c
index df88953f..568b5a40 100644
--- a/drivers/crypto/virtio/virtio_cryptodev.c
+++ b/drivers/crypto/virtio/virtio_cryptodev.c
@@ -515,16 +515,12 @@ static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
.queue_pair_setup = virtio_crypto_qp_setup,
.queue_pair_release = virtio_crypto_qp_release,
- .queue_pair_start = NULL,
- .queue_pair_stop = NULL,
.queue_pair_count = NULL,
/* Crypto related operations */
- .session_get_size = virtio_crypto_sym_get_session_private_size,
- .session_configure = virtio_crypto_sym_configure_session,
- .session_clear = virtio_crypto_sym_clear_session,
- .qp_attach_session = NULL,
- .qp_detach_session = NULL
+ .sym_session_get_size = virtio_crypto_sym_get_session_private_size,
+ .sym_session_configure = virtio_crypto_sym_configure_session,
+ .sym_session_clear = virtio_crypto_sym_clear_session
};
static void
@@ -962,7 +958,7 @@ virtio_crypto_sym_clear_session(
hw = dev->data->dev_private;
vq = hw->cvq;
- session = (struct virtio_crypto_session *)get_session_private_data(
+ session = (struct virtio_crypto_session *)get_sym_session_private_data(
sess, cryptodev_virtio_driver_id);
if (session == NULL) {
VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
@@ -1080,7 +1076,10 @@ virtio_crypto_sym_clear_session(
VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
session->session_id);
- memset(sess, 0, sizeof(struct virtio_crypto_session));
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
+ set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
+ rte_mempool_put(sess_mp, session);
rte_free(malloc_virt_addr);
}
@@ -1223,6 +1222,12 @@ virtio_crypto_sym_pad_op_ctrl_req(
/* Get cipher xform from crypto xform chain */
cipher_xform = virtio_crypto_get_cipher_xform(xform);
if (cipher_xform) {
+ if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "cipher IV size cannot be longer than %u",
+ VIRTIO_CRYPTO_MAX_IV_SIZE);
+ return -1;
+ }
if (is_chainned)
ret = virtio_crypto_sym_pad_cipher_param(
&ctrl->u.sym_create_session.u.chain.para
@@ -1390,7 +1395,7 @@ virtio_crypto_sym_configure_session(
goto error_out;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
session_private);
return 0;
@@ -1409,11 +1414,10 @@ virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
if (info != NULL) {
info->driver_id = cryptodev_virtio_driver_id;
- info->pci_dev = RTE_DEV_TO_PCI(dev->device);
info->feature_flags = dev->feature_flags;
info->max_nb_queue_pairs = hw->max_dataqueues;
- info->sym.max_nb_sessions =
- RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
info->capabilities = hw->virtio_dev_capabilities;
}
}
@@ -1426,8 +1430,7 @@ crypto_virtio_pci_probe(
struct rte_cryptodev_pmd_init_params init_params = {
.name = "",
.socket_id = rte_socket_id(),
- .private_data_size = sizeof(struct virtio_crypto_hw),
- .max_nb_sessions = RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS
+ .private_data_size = sizeof(struct virtio_crypto_hw)
};
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
@@ -1475,9 +1478,7 @@ RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
rte_virtio_crypto_driver.driver,
cryptodev_virtio_driver_id);
-RTE_INIT(virtio_crypto_init_log);
-static void
-virtio_crypto_init_log(void)
+RTE_INIT(virtio_crypto_init_log)
{
virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
if (virtio_crypto_logtype_init >= 0)
diff --git a/drivers/crypto/virtio/virtio_cryptodev.h b/drivers/crypto/virtio/virtio_cryptodev.h
index e402c030..0fd7b722 100644
--- a/drivers/crypto/virtio/virtio_cryptodev.h
+++ b/drivers/crypto/virtio/virtio_cryptodev.h
@@ -16,6 +16,8 @@
#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7
+#define VIRTIO_CRYPTO_MAX_IV_SIZE 16
+
extern uint8_t cryptodev_virtio_driver_id;
enum virtio_crypto_cmd_id {
@@ -29,6 +31,7 @@ struct virtio_crypto_op_cookie {
struct virtio_crypto_op_data_req data_req;
struct virtio_crypto_inhdr inhdr;
struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP];
+ uint8_t iv[VIRTIO_CRYPTO_MAX_IV_SIZE];
};
/*
diff --git a/drivers/crypto/virtio/virtio_rxtx.c b/drivers/crypto/virtio/virtio_rxtx.c
index 45039284..e32a1ecd 100644
--- a/drivers/crypto/virtio/virtio_rxtx.c
+++ b/drivers/crypto/virtio/virtio_rxtx.c
@@ -203,9 +203,11 @@ virtqueue_crypto_sym_enqueue_xmit(
uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
uint32_t indirect_vring_addr_offset = req_data_len +
sizeof(struct virtio_crypto_inhdr);
+ uint32_t indirect_iv_addr_offset = indirect_vring_addr_offset +
+ sizeof(struct vring_desc) * NUM_ENTRY_VIRTIO_CRYPTO_OP;
struct rte_crypto_sym_op *sym_op = cop->sym;
struct virtio_crypto_session *session =
- (struct virtio_crypto_session *)get_session_private_data(
+ (struct virtio_crypto_session *)get_sym_session_private_data(
cop->sym->session, cryptodev_virtio_driver_id);
struct virtio_crypto_op_data_req *op_data_req;
uint32_t hash_result_len = 0;
@@ -259,7 +261,17 @@ virtqueue_crypto_sym_enqueue_xmit(
/* indirect vring: iv of cipher */
if (session->iv.length) {
- desc[idx].addr = cop->phys_addr + session->iv.offset;
+ if (cop->phys_addr)
+ desc[idx].addr = cop->phys_addr + session->iv.offset;
+ else {
+ rte_memcpy(crypto_op_cookie->iv,
+ rte_crypto_op_ctod_offset(cop,
+ uint8_t *, session->iv.offset),
+ session->iv.length);
+ desc[idx].addr = indirect_op_data_req_phys_addr +
+ indirect_iv_addr_offset;
+ }
+
desc[idx].len = session->iv.length;
desc[idx++].flags = VRING_DESC_F_NEXT;
}
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index a805b227..313f4590 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <rte_common.h>
@@ -11,7 +11,6 @@
#include <rte_cpuflags.h>
#include "rte_zuc_pmd_private.h"
-
#define ZUC_MAX_BURST 4
#define BYTE_LEN 8
@@ -78,7 +77,7 @@ zuc_set_session_parameters(struct zuc_session *sess,
break;
case ZUC_OP_NOT_SUPPORTED:
default:
- ZUC_LOG_ERR("Unsupported operation chain order parameter");
+ ZUC_LOG(ERR, "Unsupported operation chain order parameter");
return -ENOTSUP;
}
@@ -88,7 +87,7 @@ zuc_set_session_parameters(struct zuc_session *sess,
return -ENOTSUP;
if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
- ZUC_LOG_ERR("Wrong IV length");
+ ZUC_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
@@ -104,14 +103,14 @@ zuc_set_session_parameters(struct zuc_session *sess,
return -ENOTSUP;
if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
- ZUC_LOG_ERR("Wrong digest length");
+ ZUC_LOG(ERR, "Wrong digest length");
return -EINVAL;
}
sess->auth_op = auth_xform->auth.op;
if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
- ZUC_LOG_ERR("Wrong IV length");
+ ZUC_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
sess->auth_iv_offset = auth_xform->auth.iv.offset;
@@ -135,7 +134,7 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(op->sym->session != NULL))
- sess = (struct zuc_session *)get_session_private_data(
+ sess = (struct zuc_session *)get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
} else {
@@ -157,8 +156,8 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -187,7 +186,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
|| ((ops[i]->sym->cipher.data.offset
% BYTE_LEN) != 0)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ZUC_LOG_ERR("Data Length or offset");
+ ZUC_LOG(ERR, "Data Length or offset");
break;
}
@@ -198,7 +197,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
(ops[i]->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
ops[i]->sym->m_dst))) {
- ZUC_LOG_ERR("PMD supports only contiguous mbufs, "
+ ZUC_LOG(ERR, "PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
"source/destination buffer.\n", ops[i]);
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -246,7 +245,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ZUC_LOG_ERR("Offset");
+ ZUC_LOG(ERR, "Offset");
break;
}
@@ -461,7 +460,7 @@ cryptodev_zuc_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- ZUC_LOG_ERR("failed to create cryptodev vdev");
+ ZUC_LOG(ERR, "failed to create cryptodev vdev");
goto init_error;
}
@@ -479,11 +478,10 @@ cryptodev_zuc_create(const char *name,
internals = dev->data->dev_private;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- ZUC_LOG_ERR("driver %s: cryptodev_zuc_create failed",
+ ZUC_LOG(ERR, "driver %s: failed",
init_params->name);
cryptodev_zuc_remove(vdev);
@@ -497,8 +495,7 @@ cryptodev_zuc_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct zuc_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -541,7 +538,11 @@ static struct cryptodev_driver zuc_crypto_drv;
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver,
cryptodev_driver_id);
+
+RTE_INIT(zuc_init_log)
+{
+ zuc_logtype_driver = rte_log_register("pmd.crypto.zuc");
+}
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
index 8abac898..6da39654 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <string.h>
@@ -130,7 +130,8 @@ zuc_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = zuc_pmd_capabilities;
}
@@ -172,13 +173,13 @@ zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- ZUC_LOG_INFO("Reusing existing ring %s"
+ ZUC_LOG(INFO, "Reusing existing ring %s"
" for processed packets",
qp->name);
return r;
}
- ZUC_LOG_ERR("Unable to reuse existing ring %s"
+ ZUC_LOG(ERR, "Unable to reuse existing ring %s"
" for processed packets",
qp->name);
return NULL;
@@ -230,22 +231,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-zuc_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-zuc_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
zuc_pmd_qp_count(struct rte_cryptodev *dev)
@@ -255,14 +240,14 @@ zuc_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the ZUC session structure */
static unsigned
-zuc_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+zuc_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct zuc_session);
}
/** Configure a ZUC session from a crypto xform chain */
static int
-zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+zuc_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -271,26 +256,27 @@ zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
int ret;
if (unlikely(sess == NULL)) {
- ZUC_LOG_ERR("invalid session struct");
+ ZUC_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
+ ZUC_LOG(ERR,
"Couldn't get object from session mempool");
+
return -ENOMEM;
}
ret = zuc_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- ZUC_LOG_ERR("failed configure session parameters");
+ ZUC_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -298,17 +284,17 @@ zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-zuc_pmd_session_clear(struct rte_cryptodev *dev,
+zuc_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct zuc_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -326,13 +312,11 @@ struct rte_cryptodev_ops zuc_pmd_ops = {
.queue_pair_setup = zuc_pmd_qp_setup,
.queue_pair_release = zuc_pmd_qp_release,
- .queue_pair_start = zuc_pmd_qp_start,
- .queue_pair_stop = zuc_pmd_qp_stop,
.queue_pair_count = zuc_pmd_qp_count,
- .session_get_size = zuc_pmd_session_get_size,
- .session_configure = zuc_pmd_session_configure,
- .session_clear = zuc_pmd_session_clear
+ .sym_session_get_size = zuc_pmd_sym_session_get_size,
+ .sym_session_configure = zuc_pmd_sym_session_configure,
+ .sym_session_clear = zuc_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops;
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h
index b83c4a04..5e5906dd 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_private.h
+++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#ifndef _RTE_ZUC_PMD_PRIVATE_H_
@@ -10,25 +10,12 @@
#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
/**< KASUMI PMD device name */
-#define ZUC_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_ZUC_DEBUG
-#define ZUC_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
- __func__, __LINE__, ## args)
-
-#define ZUC_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define ZUC_LOG_INFO(fmt, args...)
-#define ZUC_LOG_DBG(fmt, args...)
-#endif
+/** ZUC PMD LOGTYPE DRIVER */
+int zuc_logtype_driver;
+#define ZUC_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, zuc_logtype_driver, \
+ "%s()... line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
#define ZUC_IV_KEY_LENGTH 16
#define ZUC_DIGEST_LENGTH 4
@@ -37,8 +24,6 @@
struct zuc_private {
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
/** ZUC buffer queue pair */
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index cd801bfb..ea1e5cc6 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -822,9 +822,7 @@ static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
-RTE_INIT(dpaa2_eventdev_init_log);
-static void
-dpaa2_eventdev_init_log(void)
+RTE_INIT(dpaa2_eventdev_init_log)
{
dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
if (dpaa2_logtype_event >= 0)
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
index 48f1abd1..a2c2060c 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -16,7 +16,7 @@ extern int dpaa2_logtype_event;
rte_log(RTE_LOG_DEBUG, dpaa2_logtype_event, "dpaa2_event: %s(): " \
fmt "\n", __func__, ##args)
-#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_LOG(DEBUG, " >>")
+#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_DEBUG(" >>")
#define DPAA2_EVENTDEV_INFO(fmt, args...) \
DPAA2_EVENTDEV_LOG(INFO, fmt, ## args)
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 2df70b52..16a3a04b 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -23,9 +23,7 @@
int otx_logtype_ssovf;
static uint8_t timvf_enable_stats;
-RTE_INIT(otx_ssovf_init_log);
-static void
-otx_ssovf_init_log(void)
+RTE_INIT(otx_ssovf_init_log)
{
otx_logtype_ssovf = rte_log_register("pmd.event.octeontx");
if (otx_logtype_ssovf >= 0)
@@ -476,14 +474,9 @@ static int
ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev)
{
- int ret;
- const struct octeontx_nic *nic = eth_dev->data->dev_private;
RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
- ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
- if (ret)
- return 0;
- octeontx_pki_port_start(nic->port_id);
return 0;
}
@@ -492,14 +485,9 @@ static int
ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev)
{
- int ret;
- const struct octeontx_nic *nic = eth_dev->data->dev_private;
RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
- ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
- if (ret)
- return 0;
- octeontx_pki_port_stop(nic->port_id);
return 0;
}
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index d8bbc714..fffa9024 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -204,6 +204,8 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id,
uint32_t reg_off;
struct rte_event ev;
uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
+ uint64_t get_work0, get_work1;
+ uint64_t sched_type_queue;
uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
enable = ssovf_read64(base + SSO_VHGRP_QCTL);
@@ -219,7 +221,20 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id,
cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
/* Extract cq and ds count */
cq_ds_cnt &= 0x1FFF1FFF0000;
- ssows_get_work(ws, &ev);
+
+ ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
+
+ sched_type_queue = (get_work0 >> 32) & 0xfff;
+ ws->cur_tt = sched_type_queue & 0x3;
+ ws->cur_grp = sched_type_queue >> 2;
+ sched_type_queue = sched_type_queue << 38;
+ ev.event = sched_type_queue | (get_work0 & 0xffffffff);
+ if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
+ ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
+ (ev.event >> 20) & 0x7F);
+ else
+ ev.u64 = get_work1;
+
if (fn != NULL && ev.u64 != 0)
fn(arg, ev);
}
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index d55018a9..7c7306b5 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -28,11 +28,11 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
{
struct rte_mbuf *mbuf;
octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
- rte_prefetch_non_temporal(wqe);
/* Get mbuf from wqe */
mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
OCTTX_PACKET_WQE_SKIP);
+ rte_prefetch_non_temporal(mbuf);
mbuf->packet_type =
ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index c4fbd2d8..abbc9a77 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -6,9 +6,7 @@
int otx_logtype_timvf;
-RTE_INIT(otx_timvf_init_log);
-static void
-otx_timvf_init_log(void)
+RTE_INIT(otx_timvf_init_log)
{
otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
if (otx_logtype_timvf >= 0)
@@ -174,7 +172,7 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
if (use_fpa) {
pool = (uintptr_t)((struct rte_mempool *)
timr->chunk_pool)->pool_id;
- ret = octeontx_fpa_bufpool_gpool(pool);
+ ret = octeontx_fpa_bufpool_gaura(pool);
if (ret < 0) {
timvf_log_dbg("Unable to get gaura id");
ret = -ENOMEM;
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index ef9fb30c..a4f0bc8b 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -753,10 +753,7 @@ static struct rte_vdev_driver evdev_opdl_pmd_drv = {
.remove = opdl_remove
};
-RTE_INIT(opdl_init_log);
-
-static void
-opdl_init_log(void)
+RTE_INIT(opdl_init_log)
{
opdl_logtype_driver = rte_log_register("pmd.event.opdl.driver");
if (opdl_logtype_driver >= 0)
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 10f0e1ad..a6bb9138 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -361,9 +361,99 @@ sw_init_qid_iqs(struct sw_evdev *sw)
}
}
+static int
+sw_qids_empty(struct sw_evdev *sw)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < sw->qid_count; i++) {
+ for (j = 0; j < SW_IQS_MAX; j++) {
+ if (iq_count(&sw->qids[i].iq[j]))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int
+sw_ports_empty(struct sw_evdev *sw)
+{
+ unsigned int i;
+
+ for (i = 0; i < sw->port_count; i++) {
+ if ((rte_event_ring_count(sw->ports[i].rx_worker_ring)) ||
+ rte_event_ring_count(sw->ports[i].cq_worker_ring))
+ return 0;
+ }
+
+ return 1;
+}
+
static void
-sw_clean_qid_iqs(struct sw_evdev *sw)
+sw_drain_ports(struct rte_eventdev *dev)
{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ eventdev_stop_flush_t flush;
+ unsigned int i;
+ uint8_t dev_id;
+ void *arg;
+
+ flush = dev->dev_ops->dev_stop_flush;
+ dev_id = dev->data->dev_id;
+ arg = dev->data->dev_stop_flush_arg;
+
+ for (i = 0; i < sw->port_count; i++) {
+ struct rte_event ev;
+
+ while (rte_event_dequeue_burst(dev_id, i, &ev, 1, 0)) {
+ if (flush)
+ flush(dev_id, ev, arg);
+
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev_id, i, &ev, 1);
+ }
+ }
+}
+
+static void
+sw_drain_queue(struct rte_eventdev *dev, struct sw_iq *iq)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ eventdev_stop_flush_t flush;
+ uint8_t dev_id;
+ void *arg;
+
+ flush = dev->dev_ops->dev_stop_flush;
+ dev_id = dev->data->dev_id;
+ arg = dev->data->dev_stop_flush_arg;
+
+ while (iq_count(iq) > 0) {
+ struct rte_event ev;
+
+ iq_dequeue_burst(sw, iq, &ev, 1);
+
+ if (flush)
+ flush(dev_id, ev, arg);
+ }
+}
+
+static void
+sw_drain_queues(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ unsigned int i, j;
+
+ for (i = 0; i < sw->qid_count; i++) {
+ for (j = 0; j < SW_IQS_MAX; j++)
+ sw_drain_queue(dev, &sw->qids[i].iq[j]);
+ }
+}
+
+static void
+sw_clean_qid_iqs(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
int i, j;
/* Release the IQ memory of all configured qids */
@@ -729,10 +819,30 @@ static void
sw_stop(struct rte_eventdev *dev)
{
struct sw_evdev *sw = sw_pmd_priv(dev);
- sw_clean_qid_iqs(sw);
+ int32_t runstate;
+
+ /* Stop the scheduler if it's running */
+ runstate = rte_service_runstate_get(sw->service_id);
+ if (runstate == 1)
+ rte_service_runstate_set(sw->service_id, 0);
+
+ while (rte_service_may_be_active(sw->service_id))
+ rte_pause();
+
+ /* Flush all events out of the device */
+ while (!(sw_qids_empty(sw) && sw_ports_empty(sw))) {
+ sw_event_schedule(dev);
+ sw_drain_ports(dev);
+ sw_drain_queues(dev);
+ }
+
+ sw_clean_qid_iqs(dev);
sw_xstats_uninit(sw);
sw->started = 0;
rte_smp_wmb();
+
+ if (runstate == 1)
+ rte_service_runstate_set(sw->service_id, 1);
}
static int
@@ -964,9 +1074,7 @@ RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
/* declared extern in header, for access from other .c files */
int eventdev_sw_log_level;
-RTE_INIT(evdev_sw_init_log);
-static void
-evdev_sw_init_log(void)
+RTE_INIT(evdev_sw_init_log)
{
eventdev_sw_log_level = rte_log_register("pmd.event.sw");
if (eventdev_sw_log_level >= 0)
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
index 78d30e07..c40912db 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -28,6 +28,7 @@
#define MAX_PORTS 16
#define MAX_QIDS 16
#define NUM_PACKETS (1<<18)
+#define DEQUEUE_DEPTH 128
static int evdev;
@@ -147,7 +148,7 @@ init(struct test *t, int nb_queues, int nb_ports)
.nb_event_ports = nb_ports,
.nb_event_queue_flows = 1024,
.nb_events_limit = 4096,
- .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
.nb_event_port_enqueue_depth = 128,
};
int ret;
@@ -2807,6 +2808,78 @@ err:
return -1;
}
+static void
+flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
+{
+ *((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
+}
+
+static int
+dev_stop_flush(struct test *t) /* test to check we can properly flush events */
+{
+ const struct rte_event new_ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .u64 = 0xCA11BACC,
+ .queue_id = 0
+ };
+ struct rte_event ev = new_ev;
+ uint8_t count = 0;
+ int i;
+
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* Link the queue so *_start() doesn't error out */
+ if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
+ printf("%d: Error linking queue to port\n", __LINE__);
+ goto err;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto err;
+ }
+
+ for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error enqueuing events\n", __LINE__);
+ goto err;
+ }
+ }
+
+ /* Schedule the events from the port to the IQ. At least one event
+ * should be remaining in the queue.
+ */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
+ printf("%d: Error installing the flush callback\n", __LINE__);
+ goto err;
+ }
+
+ cleanup(t);
+
+ if (count == 0) {
+ printf("%d: Error executing the flush callback\n", __LINE__);
+ goto err;
+ }
+
+ if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
+ printf("%d: Error uninstalling the flush callback\n", __LINE__);
+ goto err;
+ }
+
+ return 0;
+err:
+ rte_event_dev_dump(evdev, stdout);
+ cleanup(t);
+ return -1;
+}
+
static int
worker_loopback_worker_fn(void *arg)
{
@@ -3211,6 +3284,12 @@ test_sw_eventdev(void)
printf("ERROR - Head-of-line-blocking test FAILED.\n");
goto test_fail;
}
+ printf("*** Running Stop Flush test...\n");
+ ret = dev_stop_flush(t);
+ if (ret != 0) {
+ printf("ERROR - Stop Flush test FAILED.\n");
+ goto test_fail;
+ }
if (rte_lcore_count() >= 3) {
printf("*** Running Worker loopback test...\n");
ret = worker_loopback(t, 0);
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index e12a0ec8..7d0435f5 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -442,9 +442,7 @@ struct rte_mempool_ops dpaa2_mpool_ops = {
MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
-RTE_INIT(dpaa2_mempool_init_log);
-static void
-dpaa2_mempool_init_log(void)
+RTE_INIT(dpaa2_mempool_init_log)
{
dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
if (dpaa2_logtype_mempool >= 0)
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 7aecaa85..4cf387e8 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -108,9 +108,7 @@ static struct octeontx_fpadev fpadev;
int octeontx_logtype_fpavf;
int octeontx_logtype_fpavf_mbox;
-RTE_INIT(otx_pool_init_log);
-static void
-otx_pool_init_log(void)
+RTE_INIT(otx_pool_init_log)
{
octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx");
if (octeontx_logtype_fpavf >= 0)
@@ -243,7 +241,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
POOL_ENA;
- cfg.aid = 0;
+ cfg.aid = FPA_AURA_IDX(gpool);
cfg.pool_cfg = reg;
cfg.pool_stack_base = phys_addr;
cfg.pool_stack_end = phys_addr + memsz;
@@ -327,7 +325,7 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
hdr.vfid = gpool_index;
hdr.res_code = 0;
memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
- cfg.aid = gpool_index; /* gpool is guara */
+ cfg.aid = FPA_AURA_IDX(gpool_index);
ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
@@ -335,7 +333,8 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
if (ret < 0) {
fpavf_log_err("Could not attach fpa ");
fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
- gpool_index, gpool_index, ret, hdr.res_code);
+ FPA_AURA_IDX(gpool_index), gpool_index, ret,
+ hdr.res_code);
ret = -EACCES;
goto err;
}
@@ -355,14 +354,15 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
goto err;
}
- cfg.aid = gpool_index; /* gpool is gaura */
+ cfg.aid = FPA_AURA_IDX(gpool_index);
hdr.coproc = FPA_COPROC;
hdr.msg = FPA_DETACHAURA;
hdr.vfid = gpool_index;
ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
if (ret < 0) {
fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
- gpool_index, ret, hdr.res_code);
+ FPA_AURA_IDX(gpool_index), ret,
+ hdr.res_code);
ret = -EINVAL;
}
@@ -469,6 +469,7 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
{
uint64_t cnt, limit, avail;
uint8_t gpool;
+ uint16_t gaura;
uintptr_t pool_bar;
if (unlikely(!octeontx_fpa_handle_valid(handle)))
@@ -476,14 +477,16 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
/* get the gpool */
gpool = octeontx_fpa_bufpool_gpool(handle);
+ /* get the aura */
+ gaura = octeontx_fpa_bufpool_gaura(handle);
/* Get pool bar address from handle */
pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT(gpool)));
+ FPA_VF_VHAURA_CNT(gaura)));
limit = fpavf_read64((void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
avail = fpavf_read64((void *)((uintptr_t)pool_bar +
FPA_VF_VHPOOL_AVAILABLE(gpool)));
@@ -496,6 +499,7 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
unsigned int buf_offset, int node_id)
{
unsigned int gpool;
+ unsigned int gaura;
uintptr_t gpool_handle;
uintptr_t pool_bar;
int res;
@@ -545,16 +549,18 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
goto error_pool_destroy;
}
+ gaura = FPA_AURA_IDX(gpool);
+
/* Release lock */
rte_spinlock_unlock(&fpadev.lock);
/* populate AURA registers */
fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT(gpool)));
+ FPA_VF_VHAURA_CNT(gaura)));
fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
octeontx_fpapf_start_count(gpool);
@@ -581,6 +587,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
uint64_t sz;
uint64_t cnt, avail;
uint8_t gpool;
+ uint16_t gaura;
uintptr_t pool_bar;
int ret;
@@ -594,13 +601,15 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* get the pool */
gpool = octeontx_fpa_bufpool_gpool(handle);
+ /* get the aura */
+ gaura = octeontx_fpa_bufpool_gaura(handle);
/* Get pool bar address from handle */
pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
/* Check for no outstanding buffers */
cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT(gpool)));
+ FPA_VF_VHAURA_CNT(gaura)));
if (cnt) {
fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
return -EBUSY;
@@ -613,9 +622,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* Prepare to empty the entire POOL */
fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
/* Empty the pool */
/* Invalidate the POOL */
@@ -627,11 +636,11 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* Yank a buffer from the pool */
node = (void *)(uintptr_t)
fpavf_read64((void *)
- (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
+ (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura)));
if (node == NULL) {
fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
- gpool, avail);
+ gaura, avail);
break;
}
@@ -665,9 +674,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* Deactivate the AURA */
fpavf_write64(0, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
fpavf_write64(0, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
ret = octeontx_fpapf_aura_detach(gpool);
if (ret) {
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index b76f40e7..b00be137 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -14,6 +14,7 @@
#define FPA_VF_MAX 32
#define FPA_GPOOL_MASK (FPA_VF_MAX-1)
+#define FPA_GAURA_SHIFT 4
/* FPA VF register offsets */
#define FPA_VF_INT(x) (0x200ULL | ((x) << 22))
@@ -36,6 +37,7 @@
#define FPA_VF_FREE_ADDRS_S(x, y, z) \
((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14))
+#define FPA_AURA_IDX(gpool) (gpool << FPA_GAURA_SHIFT)
/* FPA VF register offsets from VF_BAR4, size 2 MByte */
#define FPA_VF_MSIX_VEC_ADDR 0x00000
#define FPA_VF_MSIX_VEC_CTL 0x00008
@@ -102,4 +104,11 @@ octeontx_fpa_bufpool_gpool(uintptr_t handle)
{
return (uint8_t)handle & FPA_GPOOL_MASK;
}
+
+static __rte_always_inline uint16_t
+octeontx_fpa_bufpool_gaura(uintptr_t handle)
+{
+ return octeontx_fpa_bufpool_gpool(handle) << FPA_GAURA_SHIFT;
+}
+
#endif /* __OCTEONTX_FPAVF_H__ */
diff --git a/drivers/meson.build b/drivers/meson.build
index ac6c9729..f94e2fe6 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -11,6 +11,10 @@ driver_classes = ['common',
'event', # depends on common, bus, mempool and net.
'raw'] # depends on common, bus, mempool, net and event.
+default_cflags = machine_args
+if cc.has_argument('-Wno-format-truncation')
+ default_cflags += '-Wno-format-truncation'
+endif
foreach class:driver_classes
drivers = []
std_deps = []
@@ -31,7 +35,7 @@ foreach class:driver_classes
allow_experimental_apis = false
sources = []
objs = []
- cflags = machine_args
+ cflags = default_cflags
includes = [include_directories(drv_path)]
# set up internal deps. Drivers can append/override as necessary
deps = std_deps
@@ -58,6 +62,10 @@ foreach class:driver_classes
shared_objs = []
static_objs = []
foreach d:deps
+ if not is_variable('shared_rte_' + d)
+ error('Missing dependency ' + d +
+ ' for driver ' + lib_name)
+ endif
shared_objs += [get_variable('shared_rte_' + d)]
static_objs += [get_variable('static_rte_' + d)]
endforeach
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 9f9da665..664398de 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -33,6 +33,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5
DIRS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mvpp2
+DIRS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += netvsc
DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null
@@ -59,7 +60,7 @@ endif # $(CONFIG_RTE_LIBRTE_SCHED)
ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost
ifeq ($(CONFIG_RTE_EAL_VFIO),y)
-DIRS-$(CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD) += ifc
+DIRS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifc
endif
endif # $(CONFIG_RTE_LIBRTE_VHOST)
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index ea47abbf..eb3cce3a 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -305,6 +305,7 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -935,6 +936,7 @@ rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
}
/* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -1015,9 +1017,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
"framecnt=<int> "
"qdisc_bypass=<0|1>");
-RTE_INIT(af_packet_init_log);
-static void
-af_packet_init_log(void)
+RTE_INIT(af_packet_init_log)
{
af_packet_logtype = rte_log_register("pmd.net.packet");
if (af_packet_logtype >= 0)
diff --git a/drivers/net/ark/Makefile b/drivers/net/ark/Makefile
index f1433bd2..2e232be8 100644
--- a/drivers/net/ark/Makefile
+++ b/drivers/net/ark/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright (c) 2015-2017 Atomic Rules LLC
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2015-2018 Atomic Rules LLC
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/ark/ark_ddm.c b/drivers/net/ark/ark_ddm.c
index 929dc7d1..eea388a1 100644
--- a/drivers/net/ark/ark_ddm.c
+++ b/drivers/net/ark/ark_ddm.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_ddm.h b/drivers/net/ark/ark_ddm.h
index f67ad012..b37d1e09 100644
--- a/drivers/net/ark/ark_ddm.h
+++ b/drivers/net/ark/ark_ddm.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_DDM_H_
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 834d8a9e..552ca01a 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
index 987d085e..16f0d11e 100644
--- a/drivers/net/ark/ark_ethdev_rx.c
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_ethdev_rx.h b/drivers/net/ark/ark_ethdev_rx.h
index 14678711..0fdd29b1 100644
--- a/drivers/net/ark/ark_ethdev_rx.h
+++ b/drivers/net/ark/ark_ethdev_rx.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_ETHDEV_RX_H_
diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c
index 4ef55d10..57188c24 100644
--- a/drivers/net/ark/ark_ethdev_tx.c
+++ b/drivers/net/ark/ark_ethdev_tx.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_ethdev_tx.h b/drivers/net/ark/ark_ethdev_tx.h
index 657f895a..e448ce22 100644
--- a/drivers/net/ark/ark_ethdev_tx.h
+++ b/drivers/net/ark/ark_ethdev_tx.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_ETHDEV_TX_H_
diff --git a/drivers/net/ark/ark_ext.h b/drivers/net/ark/ark_ext.h
index 031cfddc..f5af2153 100644
--- a/drivers/net/ark/ark_ext.h
+++ b/drivers/net/ark/ark_ext.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_EXT_H_
diff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h
index 41eb260e..f820091d 100644
--- a/drivers/net/ark/ark_global.h
+++ b/drivers/net/ark/ark_global.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_GLOBAL_H_
diff --git a/drivers/net/ark/ark_logs.h b/drivers/net/ark/ark_logs.h
index 8aff2963..b90e9f0a 100644
--- a/drivers/net/ark/ark_logs.h
+++ b/drivers/net/ark/ark_logs.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_DEBUG_H_
diff --git a/drivers/net/ark/ark_mpu.c b/drivers/net/ark/ark_mpu.c
index d4ba6dc7..21f840f3 100644
--- a/drivers/net/ark/ark_mpu.c
+++ b/drivers/net/ark/ark_mpu.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_mpu.h b/drivers/net/ark/ark_mpu.h
index f6f6c808..92c3e67c 100644
--- a/drivers/net/ark/ark_mpu.h
+++ b/drivers/net/ark/ark_mpu.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_MPU_H_
diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/net/ark/ark_pktchkr.c
index 2cadaab8..c21003a0 100644
--- a/drivers/net/ark/ark_pktchkr.c
+++ b/drivers/net/ark/ark_pktchkr.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <getopt.h>
diff --git a/drivers/net/ark/ark_pktchkr.h b/drivers/net/ark/ark_pktchkr.h
index f4025dd6..a50f428b 100644
--- a/drivers/net/ark/ark_pktchkr.h
+++ b/drivers/net/ark/ark_pktchkr.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_PKTCHKR_H_
diff --git a/drivers/net/ark/ark_pktdir.c b/drivers/net/ark/ark_pktdir.c
index eb47dedb..1f2c8182 100644
--- a/drivers/net/ark/ark_pktdir.c
+++ b/drivers/net/ark/ark_pktdir.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <stdint.h>
diff --git a/drivers/net/ark/ark_pktdir.h b/drivers/net/ark/ark_pktdir.h
index e13fe821..314e6dea 100644
--- a/drivers/net/ark/ark_pktdir.h
+++ b/drivers/net/ark/ark_pktdir.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_PKTDIR_H_
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c
index d3c3dee1..2a2b428e 100644
--- a/drivers/net/ark/ark_pktgen.c
+++ b/drivers/net/ark/ark_pktgen.c
@@ -1,34 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <getopt.h>
diff --git a/drivers/net/ark/ark_pktgen.h b/drivers/net/ark/ark_pktgen.h
index bf5a241b..0e5f76aa 100644
--- a/drivers/net/ark/ark_pktgen.h
+++ b/drivers/net/ark/ark_pktgen.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_PKTGEN_H_
diff --git a/drivers/net/ark/ark_rqp.c b/drivers/net/ark/ark_rqp.c
index 41c497b0..bf1af4d6 100644
--- a/drivers/net/ark/ark_rqp.c
+++ b/drivers/net/ark/ark_rqp.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_rqp.h b/drivers/net/ark/ark_rqp.h
index 0c380071..6c804606 100644
--- a/drivers/net/ark/ark_rqp.h
+++ b/drivers/net/ark/ark_rqp.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_RQP_H_
diff --git a/drivers/net/ark/ark_udm.c b/drivers/net/ark/ark_udm.c
index 7a429ac7..03f1922c 100644
--- a/drivers/net/ark/ark_udm.c
+++ b/drivers/net/ark/ark_udm.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_udm.h b/drivers/net/ark/ark_udm.h
index 915343fe..5846c825 100644
--- a/drivers/net/ark/ark_udm.h
+++ b/drivers/net/ark/ark_udm.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_UDM_H_
diff --git a/drivers/net/ark/meson.build b/drivers/net/ark/meson.build
new file mode 100644
index 00000000..99151bba
--- /dev/null
+++ b/drivers/net/ark/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('ark_ddm.c',
+ 'ark_ethdev.c',
+ 'ark_ethdev_rx.c',
+ 'ark_ethdev_tx.c',
+ 'ark_mpu.c',
+ 'ark_pktchkr.c',
+ 'ark_pktdir.c',
+ 'ark_pktgen.c',
+ 'ark_rqp.c',
+ 'ark_udm.c')
diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c
index ad83a57e..3a2baaf2 100644
--- a/drivers/net/avf/avf_ethdev.c
+++ b/drivers/net/avf/avf_ethdev.c
@@ -518,16 +518,30 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
@@ -1343,9 +1357,7 @@ static struct rte_pci_driver rte_avf_pmd = {
RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
-RTE_INIT(avf_init_log);
-static void
-avf_init_log(void)
+RTE_INIT(avf_init_log)
{
avf_logtype_init = rte_log_register("pmd.net.avf.init");
if (avf_logtype_init >= 0)
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index dc97e60e..761f6c1c 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -383,7 +383,7 @@ avp_dev_translate_address(struct rte_eth_dev *eth_dev,
(host_phys_addr < (map->phys_addr + map->length))) {
/* address is within this segment */
offset += (host_phys_addr - map->phys_addr);
- addr = RTE_PTR_ADD(addr, offset);
+ addr = RTE_PTR_ADD(addr, (uintptr_t)offset);
PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
host_phys_addr, addr);
@@ -2170,6 +2170,7 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
}
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -2270,9 +2271,7 @@ avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
-RTE_INIT(avp_init_log);
-static void
-avp_init_log(void)
+RTE_INIT(avp_init_log)
{
avp_logtype_driver = rte_log_register("pmd.net.avp.driver");
if (avp_logtype_driver >= 0)
diff --git a/drivers/net/avp/meson.build b/drivers/net/avp/meson.build
new file mode 100644
index 00000000..6076c31b
--- /dev/null
+++ b/drivers/net/avp/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('avp_ethdev.c')
+install_headers('rte_avp_common.h', 'rte_avp_fifo.h')
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 7a3ba2e7..9ae9f063 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -363,7 +363,9 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -385,8 +387,6 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = AXGBE_TX_FREE_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
};
}
@@ -759,9 +759,7 @@ RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(axgbe_init_log);
-static void
-axgbe_init_log(void)
+RTE_INIT(axgbe_init_log)
{
axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
if (axgbe_logtype_init >= 0)
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
index b302bdd1..c5fd5f41 100644
--- a/drivers/net/axgbe/axgbe_rxtx.c
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -74,8 +74,10 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
(DMA_CH_INC * rxq->queue_id));
rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
DMA_CH_RDTR_LO);
- rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
/* CRC strip in AXGBE supports per port not per queue */
pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
@@ -369,10 +371,8 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (txq->nb_desc % txq->free_thresh != 0)
txq->vector_disable = 1;
- if ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) !=
- ETH_TXQ_FLAGS_NOOFFLOADS) {
+ if (tx_conf->offloads != 0)
txq->vector_disable = 1;
- }
/* Allocate TX ring hardware descriptors */
tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
diff --git a/drivers/net/bnx2x/LICENSE.bnx2x_pmd b/drivers/net/bnx2x/LICENSE.bnx2x_pmd
deleted file mode 100644
index 64c6ef2c..00000000
--- a/drivers/net/bnx2x/LICENSE.bnx2x_pmd
+++ /dev/null
@@ -1,3 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2014-2018 Cavium Inc.
- */
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index 150b4cfa..55d1ad6e 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -1,8 +1,7 @@
-# Copyright (c) 2014 - 2018 Cavium Inc.
-# All rights reserved.
-# www.cavium.com
-#
-# See LICENSE.bnx2x_pmd for copyright and licensing details.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2014 - 2018 Cavium Inc.
+# All rights reserved.
+# www.cavium.com
include $(RTE_SDK)/mk/rte.vars.mk
#
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 84ade5fb..4904eaf3 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -1,4 +1,4 @@
-/*-
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
@@ -9,8 +9,6 @@
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#define BNX2X_DRIVER_VERSION "1.78.18"
@@ -31,7 +29,7 @@
#define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
#define BNX2X_PMD_VERSION_MAJOR 1
#define BNX2X_PMD_VERSION_MINOR 0
-#define BNX2X_PMD_VERSION_REVISION 5
+#define BNX2X_PMD_VERSION_REVISION 6
#define BNX2X_PMD_VERSION_PATCH 1
static inline const char *
@@ -125,7 +123,6 @@ int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp);
-static void bnx2x_periodic_stop(struct bnx2x_softc *sc);
static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
uint8_t storm, uint16_t index, uint8_t op,
uint8_t update);
@@ -1969,9 +1966,6 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
PMD_DRV_LOG(DEBUG, "Starting NIC unload...");
- /* stop the periodic callout */
- bnx2x_periodic_stop(sc);
-
/* mark driver as unloaded in shmem2 */
if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
@@ -4490,6 +4484,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
+ PMD_DRV_LOG(DEBUG, "---> FP TASK QUEUE (%d) <--", fp->index);
+
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
@@ -4506,7 +4502,7 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
}
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
- le16toh(fp->fp_hc_idx), IGU_INT_DISABLE, 1);
+ le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
}
/*
@@ -6997,16 +6993,6 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc)
}
}
-static void bnx2x_periodic_start(struct bnx2x_softc *sc)
-{
- atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
-}
-
-static void bnx2x_periodic_stop(struct bnx2x_softc *sc)
-{
- atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
-}
-
static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
{
int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc);
@@ -7041,10 +7027,6 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
bnx2x_link_report(sc);
}
- if (!CHIP_REV_IS_SLOW(sc)) {
- bnx2x_periodic_start(sc);
- }
-
sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
return rc;
}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 4150fd85..0f6024fb 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -1,4 +1,4 @@
-/*-
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
@@ -9,8 +9,6 @@
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef __BNX2X_H__
@@ -1930,6 +1928,7 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc);
int bnx2x_complete_sp(struct bnx2x_softc *sc);
int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc);
void bnx2x_periodic_callout(struct bnx2x_softc *sc);
+void bnx2x_periodic_stop(void *param);
int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count);
void bnx2x_vf_close(struct bnx2x_softc *sc);
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 6a9cd581..575271a8 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#include "bnx2x.h"
@@ -13,6 +10,7 @@
#include <rte_dev.h>
#include <rte_ethdev_pci.h>
+#include <rte_alarm.h>
int bnx2x_logtype_init;
int bnx2x_logtype_driver;
@@ -81,26 +79,31 @@ static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
};
-static void
+static int
bnx2x_link_update(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
+ struct rte_eth_link link;
PMD_INIT_FUNC_TRACE();
+
bnx2x_link_status_update(sc);
+ memset(&link, 0, sizeof(link));
mb();
- dev->data->dev_link.link_speed = sc->link_vars.line_speed;
+ link.link_speed = sc->link_vars.line_speed;
switch (sc->link_vars.duplex) {
case DUPLEX_FULL:
- dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case DUPLEX_HALF:
- dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
break;
}
- dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- dev->data->dev_link.link_status = sc->link_vars.link_up;
+ link.link_status = sc->link_vars.link_up;
+
+ return rte_eth_linkstatus_set(dev, &link);
}
static void
@@ -109,8 +112,6 @@ bnx2x_interrupt_action(struct rte_eth_dev *dev)
struct bnx2x_softc *sc = dev->data->dev_private;
uint32_t link_status;
- PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
-
bnx2x_intr_legacy(sc, 0);
if (sc->periodic_flags & PERIODIC_GO)
@@ -128,10 +129,41 @@ bnx2x_interrupt_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct bnx2x_softc *sc = dev->data->dev_private;
+ PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
+
bnx2x_interrupt_action(dev);
rte_intr_enable(&sc->pci_dev->intr_handle);
}
+static void bnx2x_periodic_start(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ int ret = 0;
+
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
+ bnx2x_interrupt_action(dev);
+ if (IS_PF(sc)) {
+ ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
+ bnx2x_periodic_start, (void *)dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to start periodic"
+ " timer rc %d", ret);
+ assert(false && "Unable to start periodic timer");
+ }
+ }
+}
+
+void bnx2x_periodic_stop(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
+
+ rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
+}
+
/*
* Devops - helper functions can be called from user application
*/
@@ -187,6 +219,10 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ /* start the periodic callout */
+ if (sc->periodic_flags & PERIODIC_STOP)
+ bnx2x_periodic_start(dev);
+
ret = bnx2x_init(sc);
if (ret) {
PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
@@ -227,6 +263,9 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
bnx2x_interrupt_handler, (void *)dev);
}
+ /* stop the periodic callout */
+ bnx2x_periodic_stop(dev);
+
ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
if (ret) {
PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
@@ -309,20 +348,16 @@ bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete
{
PMD_INIT_FUNC_TRACE();
- int old_link_status = dev->data->dev_link.link_status;
-
- bnx2x_link_update(dev);
-
- return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
+ return bnx2x_link_update(dev);
}
static int
bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
- int old_link_status = dev->data->dev_link.link_status;
struct bnx2x_softc *sc = dev->data->dev_private;
+ int ret = 0;
- bnx2x_link_update(dev);
+ ret = bnx2x_link_update(dev);
bnx2x_check_bull(sc);
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
@@ -331,7 +366,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
dev->data->dev_link.link_status = ETH_LINK_DOWN;
}
- return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
+ return ret;
}
static int
@@ -585,6 +620,17 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
return ret;
}
+ /* schedule periodic poll for slowpath link events */
+ if (IS_PF(sc)) {
+ ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
+ bnx2x_periodic_start, (void *)eth_dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to start periodic"
+ " timer rc %d", ret);
+ return -EINVAL;
+ }
+ }
+
eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
@@ -599,18 +645,20 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
if (IS_VF(sc)) {
rte_spinlock_init(&sc->vf2pf_lock);
- if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
- &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
- RTE_CACHE_LINE_SIZE) != 0)
- return -ENOMEM;
+ ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
+ &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
+ RTE_CACHE_LINE_SIZE);
+ if (ret)
+ goto out;
sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
sc->vf2pf_mbox_mapping.vaddr;
- if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
- &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
- RTE_CACHE_LINE_SIZE) != 0)
- return -ENOMEM;
+ ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
+ &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
+ RTE_CACHE_LINE_SIZE);
+ if (ret)
+ goto out;
sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
sc->pf2vf_bulletin_mapping.vaddr;
@@ -618,10 +666,14 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
sc->max_rx_queues);
if (ret)
- return ret;
+ goto out;
}
return 0;
+
+out:
+ bnx2x_periodic_stop(eth_dev);
+ return ret;
}
static int
@@ -683,9 +735,7 @@ RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
-RTE_INIT(bnx2x_init_log);
-static void
-bnx2x_init_log(void)
+RTE_INIT(bnx2x_init_log)
{
bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init");
if (bnx2x_logtype_init >= 0)
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.h b/drivers/net/bnx2x/bnx2x_ethdev.h
index f05be7ee..807ba178 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.h
+++ b/drivers/net/bnx2x/bnx2x_ethdev.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef PMD_BNX2X_ETHDEV_H
@@ -58,7 +55,6 @@
#define wmb() rte_wmb()
#define rmb() rte_rmb()
-
#define MAX_QUEUES sysconf(_SC_NPROCESSORS_CONF)
#define BNX2X_MIN_RX_BUF_SIZE 1024
@@ -72,6 +68,8 @@
/* Maximum number of Rx packets to process at a time */
#define BNX2X_RX_BUDGET 0xffffffff
+#define BNX2X_SP_TIMER_PERIOD US_PER_S /* 1 second */
+
#endif
/* MAC address operations */
diff --git a/drivers/net/bnx2x/bnx2x_logs.h b/drivers/net/bnx2x/bnx2x_logs.h
index 69a2fe1d..9e232a9b 100644
--- a/drivers/net/bnx2x/bnx2x_logs.h
+++ b/drivers/net/bnx2x/bnx2x_logs.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef _PMD_LOGS_H_
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index 331884cf..d9a4127d 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#include "bnx2x.h"
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h
index 94b9e1b6..6ad4928c 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.h
+++ b/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef _BNX2X_RXTX_H_
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index e3880abe..edc86ccc 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1,5 +1,5 @@
-/*-
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -9,8 +9,6 @@
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#include "bnx2x.h"
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 6fcaf607..635412bd 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -1,5 +1,5 @@
-/*-
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -9,8 +9,6 @@
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef BNX2X_STATS_H
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c
index dacad771..50099d46 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/bnx2x/bnx2x_vfpf.c
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#include "bnx2x.h"
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h
index c4675d4c..cc6fef95 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/bnx2x/bnx2x_vfpf.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef BNX2X_VFPF_H
diff --git a/drivers/net/bnx2x/ecore_fw_defs.h b/drivers/net/bnx2x/ecore_fw_defs.h
index d10dd108..5984acd9 100644
--- a/drivers/net/bnx2x/ecore_fw_defs.h
+++ b/drivers/net/bnx2x/ecore_fw_defs.h
@@ -1,5 +1,5 @@
-/*-
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -8,8 +8,6 @@
* Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef ECORE_FW_DEFS_H
diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h
index 0220e5f9..57085ebb 100644
--- a/drivers/net/bnx2x/ecore_hsi.h
+++ b/drivers/net/bnx2x/ecore_hsi.h
@@ -1,5 +1,5 @@
-/*-
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -8,8 +8,6 @@
* Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef ECORE_HSI_H
diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h
index 8d00abb7..f2de07e5 100644
--- a/drivers/net/bnx2x/ecore_init.h
+++ b/drivers/net/bnx2x/ecore_init.h
@@ -1,5 +1,5 @@
-/*-
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -9,8 +9,6 @@
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef ECORE_INIT_H
diff --git a/drivers/net/bnx2x/ecore_init_ops.h b/drivers/net/bnx2x/ecore_init_ops.h
index dd5df3d5..2b003afb 100644
--- a/drivers/net/bnx2x/ecore_init_ops.h
+++ b/drivers/net/bnx2x/ecore_init_ops.h
@@ -1,5 +1,5 @@
-/*-
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -9,8 +9,6 @@
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef ECORE_INIT_OPS_H
diff --git a/drivers/net/bnx2x/ecore_mfw_req.h b/drivers/net/bnx2x/ecore_mfw_req.h
index c798c74c..fe945048 100644
--- a/drivers/net/bnx2x/ecore_mfw_req.h
+++ b/drivers/net/bnx2x/ecore_mfw_req.h
@@ -1,5 +1,5 @@
-/*-
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -8,8 +8,6 @@
* Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef ECORE_MFW_REQ_H
diff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h
index 9800bafc..ae8a93bb 100644
--- a/drivers/net/bnx2x/ecore_reg.h
+++ b/drivers/net/bnx2x/ecore_reg.h
@@ -1,5 +1,5 @@
-/*-
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -8,8 +8,6 @@
* Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef ECORE_REG_H
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index 75329672..0c8685c6 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -1,5 +1,5 @@
-/*-
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -9,8 +9,6 @@
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#include "bnx2x.h"
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index 772c8b1b..6b65a496 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -1,5 +1,5 @@
-/*-
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -9,8 +9,6 @@
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef ECORE_SP_H
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index 34a29373..b63fd23e 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -1,5 +1,5 @@
-/*
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -9,8 +9,6 @@
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#include "bnx2x.h"
diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h
index 236f9367..40000c24 100644
--- a/drivers/net/bnx2x/elink.h
+++ b/drivers/net/bnx2x/elink.h
@@ -1,5 +1,5 @@
-/*
- * Copyright (c) 2007-2013 Cavium Inc. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
@@ -9,8 +9,6 @@
* Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
*/
#ifndef ELINK_H
diff --git a/drivers/net/bnx2x/meson.build b/drivers/net/bnx2x/meson.build
new file mode 100644
index 00000000..e3c68886
--- /dev/null
+++ b/drivers/net/bnx2x/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+dep = cc.find_library('z', required: false)
+build = dep.found()
+ext_deps += dep
+cflags += '-DZLIB_CONST'
+sources = files('bnx2x.c',
+ 'bnx2x_ethdev.c',
+ 'bnx2x_rxtx.c',
+ 'bnx2x_stats.c',
+ 'bnx2x_vfpf.c',
+ 'ecore_sp.c',
+ 'elink.c')
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
index fd0cb523..8be3cb0e 100644
--- a/drivers/net/bnxt/Makefile
+++ b/drivers/net/bnxt/Makefile
@@ -29,6 +29,7 @@ EXPORT_MAP := rte_pmd_bnxt_version.map
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_cpr.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_hwrm.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ring.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxq.c
@@ -38,6 +39,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_util.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c
#
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index afaaf8c4..db5c4eb0 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -22,8 +22,24 @@
#define BNXT_MAX_MTU 9500
#define VLAN_TAG_SIZE 4
+#define BNXT_VF_RSV_NUM_RSS_CTX 1
+#define BNXT_VF_RSV_NUM_L2_CTX 4
+/* TODO: For now, do not support VMDq/RFS on VFs. */
+#define BNXT_VF_RSV_NUM_VNIC 1
#define BNXT_MAX_LED 4
#define BNXT_NUM_VLANS 2
+#define BNXT_MIN_RING_DESC 16
+#define BNXT_MAX_TX_RING_DESC 4096
+#define BNXT_MAX_RX_RING_DESC 8192
+#define BNXT_DB_SIZE 0x80
+
+#define BNXT_INT_LAT_TMR_MIN 75
+#define BNXT_INT_LAT_TMR_MAX 150
+#define BNXT_NUM_CMPL_AGGR_INT 36
+#define BNXT_CMPL_AGGR_DMA_TMR 37
+#define BNXT_NUM_CMPL_DMA_AGGR 36
+#define BNXT_CMPL_AGGR_DMA_TMR_DURING_INT 50
+#define BNXT_NUM_CMPL_DMA_AGGR_DURING_INT 12
struct bnxt_led_info {
uint8_t led_id;
@@ -98,6 +114,7 @@ struct bnxt_child_vf_info {
struct bnxt_pf_info {
#define BNXT_FIRST_PF_FID 1
#define BNXT_MAX_VFS(bp) (bp->pf.max_vfs)
+#define BNXT_TOTAL_VFS(bp) ((bp)->pf.total_vfs)
#define BNXT_FIRST_VF_FID 128
#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
#define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp))
@@ -105,6 +122,9 @@ struct bnxt_pf_info {
uint16_t first_vf_id;
uint16_t active_vfs;
uint16_t max_vfs;
+ uint16_t total_vfs; /* Total VFs possible.
+ * Not necessarily enabled.
+ */
uint32_t func_cfg_flags;
void *vf_req_buf;
rte_iova_t vf_req_buf_dma_addr;
@@ -202,6 +222,16 @@ struct bnxt_ptp_cfg {
uint32_t tx_mapped_regs[BNXT_PTP_TX_REGS];
};
+struct bnxt_coal {
+ uint16_t num_cmpl_aggr_int;
+ uint16_t num_cmpl_dma_aggr;
+ uint16_t num_cmpl_dma_aggr_during_int;
+ uint16_t int_lat_tmr_max;
+ uint16_t int_lat_tmr_min;
+ uint16_t cmpl_aggr_dma_tmr;
+ uint16_t cmpl_aggr_dma_tmr_during_int;
+};
+
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt {
void *bar0;
@@ -302,12 +332,14 @@ struct bnxt {
struct bnxt_led_info leds[BNXT_MAX_LED];
uint8_t num_leds;
struct bnxt_ptp_cfg *ptp_cfg;
+ uint16_t vf_resv_strategy;
};
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
bool is_bnxt_supported(struct rte_eth_dev *dev);
+bool bnxt_stratus_device(struct bnxt *bp);
extern const struct rte_flow_ops bnxt_flow_ops;
extern int bnxt_logtype_driver;
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index 6c1e6d2b..c7af5698 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -22,12 +22,20 @@
#define ADV_RAW_CMP(idx, n) ((idx) + (n))
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
#define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask)
+#define RING_CMPL(ring_mask, idx) ((idx) & (ring_mask))
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
#define FLIP_VALID(cons, mask, val) ((cons) >= (mask) ? !(val) : (val))
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define NEXT_CMPL(cpr, idx, v, inc) do { \
+ (idx) += (inc); \
+ if (unlikely((idx) == (cpr)->cp_ring_struct->ring_size)) { \
+ (v) = !(v); \
+ (idx) = 0; \
+ } \
+} while (0)
#define B_CP_DB_REARM(cpr, raw_cons) \
rte_write32((DB_CP_REARM_FLAGS | \
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
@@ -50,6 +58,10 @@
rte_write32((DB_CP_FLAGS | \
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
((cpr)->cp_doorbell))
+#define B_CP_DB(cpr, raw_cons, ring_mask) \
+ rte_write32((DB_CP_FLAGS | \
+ RING_CMPL((ring_mask), raw_cons)), \
+ ((cpr)->cp_doorbell))
struct bnxt_ring;
struct bnxt_cp_ring_info {
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 6e56bfd3..cc7e4391 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -26,6 +26,7 @@
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
#include "bnxt_nvm_defs.h"
+#include "bnxt_util.h"
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
@@ -73,6 +74,7 @@ int bnxt_logtype_driver;
#define BROADCOM_DEV_ID_58802 0xd802
#define BROADCOM_DEV_ID_58804 0xd804
#define BROADCOM_DEV_ID_58808 0x16f0
+#define BROADCOM_DEV_ID_58802_VF 0xd800
static const struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
@@ -116,6 +118,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -147,11 +150,13 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_CRC_STRIP | \
+ DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_TCP_LRO)
static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
+static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
/***********************/
@@ -195,13 +200,14 @@ alloc_mem_err:
static int bnxt_init_chip(struct bnxt *bp)
{
- unsigned int i;
+ struct bnxt_rx_queue *rxq;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
uint32_t queue_id, base = BNXT_MISC_VEC_ID;
uint32_t vec = BNXT_MISC_VEC_ID;
+ unsigned int i, j;
int rc;
/* disable uio/vfio intr/eventfd mapping */
@@ -243,7 +249,19 @@ static int bnxt_init_chip(struct bnxt *bp)
/* VNIC configuration */
for (i = 0; i < bp->nr_vnics; i++) {
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
+
+ vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
+ if (!vnic->fw_grp_ids) {
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc %d bytes for group ids\n",
+ size);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ memset(vnic->fw_grp_ids, -1, size);
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
@@ -252,12 +270,15 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "HWRM vnic %d ctx alloc failure rc: %x\n",
- i, rc);
- goto err_out;
+ /* Alloc RSS context only if RSS mode is enabled */
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM vnic %d ctx alloc failure rc: %x\n",
+ i, rc);
+ goto err_out;
+ }
}
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
@@ -275,6 +296,13 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
+ for (j = 0; j < bp->rx_nr_rings; j++) {
+ rxq = bp->eth_dev->data->rx_queues[j];
+
+ if (rxq->rx_deferred_start)
+ rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
+ }
+
rc = bnxt_vnic_rss_configure(bp, vnic);
if (rc) {
PMD_DRV_LOG(ERR,
@@ -410,7 +438,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
- dev_info->reta_size = bp->max_rsscos_ctx;
+ dev_info->reta_size = HW_HASH_INDEX_SIZE;
dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
@@ -449,6 +477,10 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
eth_dev->data->dev_conf.intr_conf.lsc = 1;
eth_dev->data->dev_conf.intr_conf.rxq = 1;
+ dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
+ dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
+ dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
+ dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
/* *INDENT-ON* */
@@ -489,6 +521,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+ int rc;
bp->rx_queues = (void *)eth_dev->data->rx_queues;
bp->tx_queues = (void *)eth_dev->data->tx_queues;
@@ -496,19 +529,23 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
- int rc;
+ rc = bnxt_hwrm_check_vf_rings(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
+ return -ENOSPC;
+ }
- rc = bnxt_hwrm_func_reserve_vf_resc(bp);
+ rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
return -ENOSPC;
}
-
+ } else {
/* legacy driver needs to get updated values */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
- return -ENOSPC;
+ return rc;
}
}
@@ -519,7 +556,9 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
bp->max_cp_rings ||
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_stat_ctx ||
- (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) {
+ (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps ||
+ (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+ bp->max_vnics < eth_dev->data->nb_rx_queues)) {
PMD_DRV_LOG(ERR,
"Insufficient resources to support requested config\n");
PMD_DRV_LOG(ERR,
@@ -527,9 +566,9 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
eth_dev->data->nb_tx_queues,
eth_dev->data->nb_rx_queues);
PMD_DRV_LOG(ERR,
- "Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
+ "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
- bp->max_stat_ctx, bp->max_ring_grps);
+ bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
return -ENOSPC;
}
@@ -664,6 +703,8 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
rte_free(bp->grp_info);
bp->grp_info = NULL;
}
+
+ bnxt_dev_uninit(eth_dev);
}
static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
@@ -1287,9 +1328,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
struct bnxt_vnic_info *vnic;
unsigned int i;
int rc = 0;
- uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
- HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
- uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+ uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
/* Cycle through all VNICs */
for (i = 0; i < bp->nr_vnics; i++) {
@@ -1336,8 +1377,8 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
memcpy(new_filter->l2_addr, filter->l2_addr,
ETHER_ADDR_LEN);
/* MAC + VLAN ID filter */
- new_filter->l2_ovlan = vlan_id;
- new_filter->l2_ovlan_mask = 0xF000;
+ new_filter->l2_ivlan = vlan_id;
+ new_filter->l2_ivlan_mask = 0xF000;
new_filter->enables |= en;
rc = bnxt_hwrm_set_l2_filter(bp,
vnic->fw_vnic_id,
@@ -1563,6 +1604,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint16_t size = 0;
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
@@ -1570,9 +1612,14 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
if (rc)
break;
- rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
- if (rc)
- return rc;
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ if (size < new_mtu) {
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc)
+ return rc;
+ }
}
return rc;
@@ -3061,6 +3108,18 @@ static bool bnxt_vf_pciid(uint16_t id)
id == BROADCOM_DEV_ID_5741X_VF ||
id == BROADCOM_DEV_ID_57414_VF ||
id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 ||
+ id == BROADCOM_DEV_ID_58802_VF)
+ return true;
+ return false;
+}
+
+bool bnxt_stratus_device(struct bnxt *bp)
+{
+ uint16_t id = bp->pdev->id.device_id;
+
+ if (id == BROADCOM_DEV_ID_STRATUS_NIC ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
id == BROADCOM_DEV_ID_STRATUS_NIC_VF2)
return true;
return false;
@@ -3112,7 +3171,6 @@ init_err_disable:
return rc;
}
-static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
#define ALLOW_FUNC(x) \
{ \
@@ -3404,13 +3462,15 @@ error:
}
static int
-bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
+bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
+{
struct bnxt *bp = eth_dev->data->dev_private;
int rc;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
+ PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
bnxt_disable_int(bp);
bnxt_free_int(bp);
bnxt_free_mem(bp);
@@ -3424,8 +3484,17 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
}
rc = bnxt_hwrm_func_driver_unregister(bp, 0);
bnxt_free_hwrm_resources(bp);
- rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
- rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
+
+ if (bp->tx_mem_zone) {
+ rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
+ bp->tx_mem_zone = NULL;
+ }
+
+ if (bp->rx_mem_zone) {
+ rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
+ bp->rx_mem_zone = NULL;
+ }
+
if (bp->dev_stopped == 0)
bnxt_dev_close_op(eth_dev);
if (bp->pf.vf_info)
@@ -3471,9 +3540,7 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &bnxt_rte_pmd);
}
-RTE_INIT(bnxt_init_log);
-static void
-bnxt_init_log(void)
+RTE_INIT(bnxt_init_log)
{
bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
if (bnxt_logtype_driver >= 0)
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index e36da997..1038941e 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -117,16 +117,29 @@ void bnxt_free_filter_mem(struct bnxt *bp)
max_filters = bp->max_l2_ctx;
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
- if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
- PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
+ if (filter->fw_l2_filter_id != ((uint64_t)-1) &&
+ filter->filter_type == HWRM_CFA_L2_FILTER) {
+ PMD_DRV_LOG(ERR, "L2 filter is not free\n");
/* Call HWRM to try to free filter again */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
PMD_DRV_LOG(ERR,
- "HWRM filter cannot be freed rc = %d\n",
- rc);
+ "Cannot free L2 filter: %d\n",
+ rc);
}
filter->fw_l2_filter_id = UINT64_MAX;
+
+ if (filter->fw_ntuple_filter_id != ((uint64_t)-1) &&
+ filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
+ PMD_DRV_LOG(ERR, "NTUPLE filter is not free\n");
+ /* Call HWRM to try to free filter again */
+ rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ if (rc)
+ PMD_DRV_LOG(ERR,
+ "Cannot free NTUPLE filter: %d\n",
+ rc);
+ }
+ filter->fw_ntuple_filter_id = UINT64_MAX;
}
STAILQ_INIT(&bp->free_filter_list);
@@ -180,1072 +193,3 @@ void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
{
STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
}
-
-static int
-bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- if (!pattern) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM_NUM,
- NULL, "NULL pattern.");
- return -rte_errno;
- }
-
- if (!actions) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_NUM,
- NULL, "NULL action.");
- return -rte_errno;
- }
-
- if (!attr) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR,
- NULL, "NULL attribute.");
- return -rte_errno;
- }
-
- return 0;
-}
-
-static const struct rte_flow_item *
-nxt_non_void_pattern(const struct rte_flow_item *cur)
-{
- while (1) {
- if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
- return cur;
- cur++;
- }
-}
-
-static const struct rte_flow_action *
-nxt_non_void_action(const struct rte_flow_action *cur)
-{
- while (1) {
- if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
- return cur;
- cur++;
- }
-}
-
-int bnxt_check_zero_bytes(const uint8_t *bytes, int len)
-{
- int i;
- for (i = 0; i < len; i++)
- if (bytes[i] != 0x00)
- return 0;
- return 1;
-}
-
-static int
-bnxt_filter_type_check(const struct rte_flow_item pattern[],
- struct rte_flow_error *error __rte_unused)
-{
- const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
- int use_ntuple = 1;
-
- while (item->type != RTE_FLOW_ITEM_TYPE_END) {
- switch (item->type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- use_ntuple = 1;
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- use_ntuple = 0;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- case RTE_FLOW_ITEM_TYPE_IPV6:
- case RTE_FLOW_ITEM_TYPE_TCP:
- case RTE_FLOW_ITEM_TYPE_UDP:
- /* FALLTHROUGH */
- /* need ntuple match, reset exact match */
- if (!use_ntuple) {
- PMD_DRV_LOG(ERR,
- "VLAN flow cannot use NTUPLE filter\n");
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Cannot use VLAN with NTUPLE");
- return -rte_errno;
- }
- use_ntuple |= 1;
- break;
- default:
- PMD_DRV_LOG(ERR, "Unknown Flow type");
- use_ntuple |= 1;
- }
- item++;
- }
- return use_ntuple;
-}
-
-static int
-bnxt_validate_and_parse_flow_type(struct bnxt *bp,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- struct rte_flow_error *error,
- struct bnxt_filter_info *filter)
-{
- const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
- const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
- const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
- const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
- const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
- const struct rte_flow_item_udp *udp_spec, *udp_mask;
- const struct rte_flow_item_eth *eth_spec, *eth_mask;
- const struct rte_flow_item_nvgre *nvgre_spec;
- const struct rte_flow_item_nvgre *nvgre_mask;
- const struct rte_flow_item_vxlan *vxlan_spec;
- const struct rte_flow_item_vxlan *vxlan_mask;
- uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
- uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
- const struct rte_flow_item_vf *vf_spec;
- uint32_t tenant_id_be = 0;
- bool vni_masked = 0;
- bool tni_masked = 0;
- uint32_t vf = 0;
- int use_ntuple;
- uint32_t en = 0;
- uint32_t en_ethertype;
- int dflt_vnic;
-
- use_ntuple = bnxt_filter_type_check(pattern, error);
- PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
- if (use_ntuple < 0)
- return use_ntuple;
-
- filter->filter_type = use_ntuple ?
- HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
- en_ethertype = use_ntuple ?
- NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
- EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
-
- while (item->type != RTE_FLOW_ITEM_TYPE_END) {
- if (item->last) {
- /* last or range is NOT supported as match criteria */
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "No support for range");
- return -rte_errno;
- }
- if (!item->spec || !item->mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "spec/mask is NULL");
- return -rte_errno;
- }
- switch (item->type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = item->spec;
- eth_mask = item->mask;
-
- /* Source MAC address mask cannot be partially set.
- * Should be All 0's or all 1's.
- * Destination MAC address mask must not be partially
- * set. Should be all 1's or all 0's.
- */
- if ((!is_zero_ether_addr(&eth_mask->src) &&
- !is_broadcast_ether_addr(&eth_mask->src)) ||
- (!is_zero_ether_addr(&eth_mask->dst) &&
- !is_broadcast_ether_addr(&eth_mask->dst))) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "MAC_addr mask not valid");
- return -rte_errno;
- }
-
- /* Mask is not allowed. Only exact matches are */
- if (eth_mask->type &&
- eth_mask->type != RTE_BE16(0xffff)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "ethertype mask not valid");
- return -rte_errno;
- }
-
- if (is_broadcast_ether_addr(&eth_mask->dst)) {
- rte_memcpy(filter->dst_macaddr,
- &eth_spec->dst, 6);
- en |= use_ntuple ?
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
- EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
- }
- if (is_broadcast_ether_addr(&eth_mask->src)) {
- rte_memcpy(filter->src_macaddr,
- &eth_spec->src, 6);
- en |= use_ntuple ?
- NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
- EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
- } /*
- * else {
- * RTE_LOG(ERR, PMD, "Handle this condition\n");
- * }
- */
- if (eth_mask->type) {
- filter->ethertype =
- rte_be_to_cpu_16(eth_spec->type);
- en |= en_ethertype;
- }
-
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec = item->spec;
- vlan_mask = item->mask;
- if (en & en_ethertype) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VLAN TPID matching is not"
- " supported");
- return -rte_errno;
- }
- if (vlan_mask->tci &&
- vlan_mask->tci == RTE_BE16(0x0fff)) {
- /* Only the VLAN ID can be matched. */
- filter->l2_ovlan =
- rte_be_to_cpu_16(vlan_spec->tci &
- RTE_BE16(0x0fff));
- en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
- } else if (vlan_mask->tci) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VLAN mask is invalid");
- return -rte_errno;
- }
- if (vlan_mask->inner_type &&
- vlan_mask->inner_type != RTE_BE16(0xffff)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "inner ethertype mask not"
- " valid");
- return -rte_errno;
- }
- if (vlan_mask->inner_type) {
- filter->ethertype =
- rte_be_to_cpu_16(vlan_spec->inner_type);
- en |= en_ethertype;
- }
-
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- /* If mask is not involved, we could use EM filters. */
- ipv4_spec = item->spec;
- ipv4_mask = item->mask;
- /* Only IP DST and SRC fields are maskable. */
- if (ipv4_mask->hdr.version_ihl ||
- ipv4_mask->hdr.type_of_service ||
- ipv4_mask->hdr.total_length ||
- ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.fragment_offset ||
- ipv4_mask->hdr.time_to_live ||
- ipv4_mask->hdr.next_proto_id ||
- ipv4_mask->hdr.hdr_checksum) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid IPv4 mask.");
- return -rte_errno;
- }
- filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
- filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
- if (use_ntuple)
- en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
- else
- en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
- EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
- if (ipv4_mask->hdr.src_addr) {
- filter->src_ipaddr_mask[0] =
- ipv4_mask->hdr.src_addr;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
- }
- if (ipv4_mask->hdr.dst_addr) {
- filter->dst_ipaddr_mask[0] =
- ipv4_mask->hdr.dst_addr;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
- }
- filter->ip_addr_type = use_ntuple ?
- HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
- HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
- if (ipv4_spec->hdr.next_proto_id) {
- filter->ip_protocol =
- ipv4_spec->hdr.next_proto_id;
- if (use_ntuple)
- en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
- else
- en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- ipv6_spec = item->spec;
- ipv6_mask = item->mask;
-
- /* Only IP DST and SRC fields are maskable. */
- if (ipv6_mask->hdr.vtc_flow ||
- ipv6_mask->hdr.payload_len ||
- ipv6_mask->hdr.proto ||
- ipv6_mask->hdr.hop_limits) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid IPv6 mask.");
- return -rte_errno;
- }
-
- if (use_ntuple)
- en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
- else
- en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
- EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
- rte_memcpy(filter->src_ipaddr,
- ipv6_spec->hdr.src_addr, 16);
- rte_memcpy(filter->dst_ipaddr,
- ipv6_spec->hdr.dst_addr, 16);
- if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
- 16)) {
- rte_memcpy(filter->src_ipaddr_mask,
- ipv6_mask->hdr.src_addr, 16);
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
- }
- if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
- 16)) {
- rte_memcpy(filter->dst_ipaddr_mask,
- ipv6_mask->hdr.dst_addr, 16);
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
- }
- filter->ip_addr_type = use_ntuple ?
- NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
- EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- tcp_spec = item->spec;
- tcp_mask = item->mask;
-
- /* Check TCP mask. Only DST & SRC ports are maskable */
- if (tcp_mask->hdr.sent_seq ||
- tcp_mask->hdr.recv_ack ||
- tcp_mask->hdr.data_off ||
- tcp_mask->hdr.tcp_flags ||
- tcp_mask->hdr.rx_win ||
- tcp_mask->hdr.cksum ||
- tcp_mask->hdr.tcp_urp) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid TCP mask");
- return -rte_errno;
- }
- filter->src_port = tcp_spec->hdr.src_port;
- filter->dst_port = tcp_spec->hdr.dst_port;
- if (use_ntuple)
- en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
- else
- en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
- EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
- if (tcp_mask->hdr.dst_port) {
- filter->dst_port_mask = tcp_mask->hdr.dst_port;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
- }
- if (tcp_mask->hdr.src_port) {
- filter->src_port_mask = tcp_mask->hdr.src_port;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- udp_spec = item->spec;
- udp_mask = item->mask;
-
- if (udp_mask->hdr.dgram_len ||
- udp_mask->hdr.dgram_cksum) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid UDP mask");
- return -rte_errno;
- }
-
- filter->src_port = udp_spec->hdr.src_port;
- filter->dst_port = udp_spec->hdr.dst_port;
- if (use_ntuple)
- en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
- else
- en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
- EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
-
- if (udp_mask->hdr.dst_port) {
- filter->dst_port_mask = udp_mask->hdr.dst_port;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
- }
- if (udp_mask->hdr.src_port) {
- filter->src_port_mask = udp_mask->hdr.src_port;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- vxlan_spec = item->spec;
- vxlan_mask = item->mask;
- /* Check if VXLAN item is used to describe protocol.
- * If yes, both spec and mask should be NULL.
- * If no, both spec and mask shouldn't be NULL.
- */
- if ((!vxlan_spec && vxlan_mask) ||
- (vxlan_spec && !vxlan_mask)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid VXLAN item");
- return -rte_errno;
- }
-
- if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
- vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
- vxlan_spec->flags != 0x8) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid VXLAN item");
- return -rte_errno;
- }
-
- /* Check if VNI is masked. */
- if (vxlan_spec && vxlan_mask) {
- vni_masked =
- !!memcmp(vxlan_mask->vni, vni_mask,
- RTE_DIM(vni_mask));
- if (vni_masked) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid VNI mask");
- return -rte_errno;
- }
-
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->vni =
- rte_be_to_cpu_32(tenant_id_be);
- filter->tunnel_type =
- CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_NVGRE:
- nvgre_spec = item->spec;
- nvgre_mask = item->mask;
- /* Check if NVGRE item is used to describe protocol.
- * If yes, both spec and mask should be NULL.
- * If no, both spec and mask shouldn't be NULL.
- */
- if ((!nvgre_spec && nvgre_mask) ||
- (nvgre_spec && !nvgre_mask)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid NVGRE item");
- return -rte_errno;
- }
-
- if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
- nvgre_spec->protocol != 0x6558) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid NVGRE item");
- return -rte_errno;
- }
-
- if (nvgre_spec && nvgre_mask) {
- tni_masked =
- !!memcmp(nvgre_mask->tni, tni_mask,
- RTE_DIM(tni_mask));
- if (tni_masked) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid TNI mask");
- return -rte_errno;
- }
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- nvgre_spec->tni, 3);
- filter->vni =
- rte_be_to_cpu_32(tenant_id_be);
- filter->tunnel_type =
- CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_VF:
- vf_spec = item->spec;
- vf = vf_spec->id;
- if (!BNXT_PF(bp)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Configuring on a VF!");
- return -rte_errno;
- }
-
- if (vf >= bp->pdev->max_vfs) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Incorrect VF id!");
- return -rte_errno;
- }
-
- if (!attr->transfer) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Matching VF traffic without"
- " affecting it (transfer attribute)"
- " is unsupported");
- return -rte_errno;
- }
-
- filter->mirror_vnic_id =
- dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
- if (dflt_vnic < 0) {
- /* This simply indicates there's no driver
- * loaded. This is not an error.
- */
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Unable to get default VNIC for VF");
- return -rte_errno;
- }
- filter->mirror_vnic_id = dflt_vnic;
- en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
- break;
- default:
- break;
- }
- item++;
- }
- filter->enables = en;
-
- return 0;
-}
-
-/* Parse attributes */
-static int
-bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
-{
- /* Must be input direction */
- if (!attr->ingress) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- attr, "Only support ingress.");
- return -rte_errno;
- }
-
- /* Not supported */
- if (attr->egress) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- attr, "No support for egress.");
- return -rte_errno;
- }
-
- /* Not supported */
- if (attr->priority) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- attr, "No support for priority.");
- return -rte_errno;
- }
-
- /* Not supported */
- if (attr->group) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- attr, "No support for group.");
- return -rte_errno;
- }
-
- return 0;
-}
-
-struct bnxt_filter_info *
-bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
- struct bnxt_vnic_info *vnic)
-{
- struct bnxt_filter_info *filter1, *f0;
- struct bnxt_vnic_info *vnic0;
- int rc;
-
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- f0 = STAILQ_FIRST(&vnic0->filter);
-
- //This flow has same DST MAC as the port/l2 filter.
- if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
- return f0;
-
- //This flow needs DST MAC which is not same as port/l2
- PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
- filter1 = bnxt_get_unused_filter(bp);
- if (filter1 == NULL)
- return NULL;
- filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
- filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
- L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
- memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
- memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
- rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
- filter1);
- if (rc) {
- bnxt_free_filter(bp, filter1);
- return NULL;
- }
- return filter1;
-}
-
-static int
-bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error,
- struct bnxt_filter_info *filter)
-{
- const struct rte_flow_action *act = nxt_non_void_action(actions);
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- const struct rte_flow_action_queue *act_q;
- const struct rte_flow_action_vf *act_vf;
- struct bnxt_vnic_info *vnic, *vnic0;
- struct bnxt_filter_info *filter1;
- uint32_t vf = 0;
- int dflt_vnic;
- int rc;
-
- if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Cannot create flow on RSS queues");
- rc = -rte_errno;
- goto ret;
- }
-
- rc = bnxt_validate_and_parse_flow_type(bp, attr, pattern, error,
- filter);
- if (rc != 0)
- goto ret;
-
- rc = bnxt_flow_parse_attr(attr, error);
- if (rc != 0)
- goto ret;
- //Since we support ingress attribute only - right now.
- if (filter->filter_type == HWRM_CFA_EM_FILTER)
- filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
-
- switch (act->type) {
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- /* Allow this flow. Redirect to a VNIC. */
- act_q = (const struct rte_flow_action_queue *)act->conf;
- if (act_q->index >= bp->rx_nr_rings) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, act,
- "Invalid queue ID.");
- rc = -rte_errno;
- goto ret;
- }
- PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
-
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
- if (vnic == NULL) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, act,
- "No matching VNIC for queue ID.");
- rc = -rte_errno;
- goto ret;
- }
- filter->dst_id = vnic->fw_vnic_id;
- filter1 = bnxt_get_l2_filter(bp, filter, vnic);
- if (filter1 == NULL) {
- rc = -ENOSPC;
- goto ret;
- }
- filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- PMD_DRV_LOG(DEBUG, "VNIC found\n");
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
- if (filter1 == NULL) {
- rc = -ENOSPC;
- goto ret;
- }
- filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- if (filter->filter_type == HWRM_CFA_EM_FILTER)
- filter->flags =
- HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
- else
- filter->flags =
- HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
- break;
- case RTE_FLOW_ACTION_TYPE_COUNT:
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
- if (filter1 == NULL) {
- rc = -ENOSPC;
- goto ret;
- }
- filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
- break;
- case RTE_FLOW_ACTION_TYPE_VF:
- act_vf = (const struct rte_flow_action_vf *)act->conf;
- vf = act_vf->id;
- if (!BNXT_PF(bp)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "Configuring on a VF!");
- rc = -rte_errno;
- goto ret;
- }
-
- if (vf >= bp->pdev->max_vfs) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "Incorrect VF id!");
- rc = -rte_errno;
- goto ret;
- }
-
- filter->mirror_vnic_id =
- dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
- if (dflt_vnic < 0) {
- /* This simply indicates there's no driver loaded.
- * This is not an error.
- */
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "Unable to get default VNIC for VF");
- rc = -rte_errno;
- goto ret;
- }
- filter->mirror_vnic_id = dflt_vnic;
- filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
-
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
- if (filter1 == NULL) {
- rc = -ENOSPC;
- goto ret;
- }
- filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- break;
-
- default:
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, act,
- "Invalid action.");
- rc = -rte_errno;
- goto ret;
- }
-
- act = nxt_non_void_action(++act);
- if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Invalid action.");
- rc = -rte_errno;
- goto ret;
- }
-ret:
- return rc;
-}
-
-static int
-bnxt_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- struct bnxt_filter_info *filter;
- int ret = 0;
-
- ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
- if (ret != 0)
- return ret;
-
- filter = bnxt_get_unused_filter(bp);
- if (filter == NULL) {
- PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
- return -ENOMEM;
- }
-
- ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
- error, filter);
- /* No need to hold on to this filter if we are just validating flow */
- filter->fw_l2_filter_id = UINT64_MAX;
- bnxt_free_filter(bp, filter);
-
- return ret;
-}
-
-static int
-bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
-{
- struct bnxt_filter_info *mf;
- struct rte_flow *flow;
- int i;
-
- for (i = bp->nr_vnics - 1; i >= 0; i--) {
- struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
-
- STAILQ_FOREACH(flow, &vnic->flow_list, next) {
- mf = flow->filter;
-
- if (mf->filter_type == nf->filter_type &&
- mf->flags == nf->flags &&
- mf->src_port == nf->src_port &&
- mf->src_port_mask == nf->src_port_mask &&
- mf->dst_port == nf->dst_port &&
- mf->dst_port_mask == nf->dst_port_mask &&
- mf->ip_protocol == nf->ip_protocol &&
- mf->ip_addr_type == nf->ip_addr_type &&
- mf->ethertype == nf->ethertype &&
- mf->vni == nf->vni &&
- mf->tunnel_type == nf->tunnel_type &&
- mf->l2_ovlan == nf->l2_ovlan &&
- mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
- mf->l2_ivlan == nf->l2_ivlan &&
- mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
- !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
- !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
- ETHER_ADDR_LEN) &&
- !memcmp(mf->src_macaddr, nf->src_macaddr,
- ETHER_ADDR_LEN) &&
- !memcmp(mf->dst_macaddr, nf->dst_macaddr,
- ETHER_ADDR_LEN) &&
- !memcmp(mf->src_ipaddr, nf->src_ipaddr,
- sizeof(nf->src_ipaddr)) &&
- !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
- sizeof(nf->src_ipaddr_mask)) &&
- !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
- sizeof(nf->dst_ipaddr)) &&
- !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
- sizeof(nf->dst_ipaddr_mask))) {
- if (mf->dst_id == nf->dst_id)
- return -EEXIST;
- /* Same Flow, Different queue
- * Clear the old ntuple filter
- */
- if (nf->filter_type == HWRM_CFA_EM_FILTER)
- bnxt_hwrm_clear_em_filter(bp, mf);
- if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
- bnxt_hwrm_clear_ntuple_filter(bp, mf);
- /* Free the old filter, update flow
- * with new filter
- */
- bnxt_free_filter(bp, mf);
- flow->filter = nf;
- return -EXDEV;
- }
- }
- }
- return 0;
-}
-
-static struct rte_flow *
-bnxt_flow_create(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- struct bnxt_filter_info *filter;
- struct bnxt_vnic_info *vnic = NULL;
- bool update_flow = false;
- struct rte_flow *flow;
- unsigned int i;
- int ret = 0;
-
- flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
- if (!flow) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to allocate memory");
- return flow;
- }
-
- ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "Not a validate flow.\n");
- goto free_flow;
- }
-
- filter = bnxt_get_unused_filter(bp);
- if (filter == NULL) {
- PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
- goto free_flow;
- }
-
- ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
- error, filter);
- if (ret != 0)
- goto free_filter;
-
- ret = bnxt_match_filter(bp, filter);
- if (ret == -EEXIST) {
- PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
- /* Clear the filter that was created as part of
- * validate_and_parse_flow() above
- */
- bnxt_hwrm_clear_l2_filter(bp, filter);
- goto free_filter;
- } else if (ret == -EXDEV) {
- PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
- PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
- update_flow = true;
- }
-
- if (filter->filter_type == HWRM_CFA_EM_FILTER) {
- filter->enables |=
- HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
- ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
- }
- if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
- filter->enables |=
- HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
- ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
- }
-
- for (i = 0; i < bp->nr_vnics; i++) {
- vnic = &bp->vnic_info[i];
- if (filter->dst_id == vnic->fw_vnic_id)
- break;
- }
-
- if (!ret) {
- flow->filter = filter;
- flow->vnic = vnic;
- if (update_flow) {
- ret = -EXDEV;
- goto free_flow;
- }
- PMD_DRV_LOG(ERR, "Successfully created flow.\n");
- STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
- return flow;
- }
-free_filter:
- bnxt_free_filter(bp, filter);
-free_flow:
- if (ret == -EEXIST)
- rte_flow_error_set(error, ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Matching Flow exists.");
- else if (ret == -EXDEV)
- rte_flow_error_set(error, ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Flow with pattern exists, updating destination queue");
- else
- rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to create flow.");
- rte_free(flow);
- flow = NULL;
- return flow;
-}
-
-static int
-bnxt_flow_destroy(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- struct bnxt_filter_info *filter = flow->filter;
- struct bnxt_vnic_info *vnic = flow->vnic;
- int ret = 0;
-
- ret = bnxt_match_filter(bp, filter);
- if (ret == 0)
- PMD_DRV_LOG(ERR, "Could not find matching flow\n");
- if (filter->filter_type == HWRM_CFA_EM_FILTER)
- ret = bnxt_hwrm_clear_em_filter(bp, filter);
- if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
- ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
- else
- ret = bnxt_hwrm_clear_l2_filter(bp, filter);
- if (!ret) {
- STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
- rte_free(flow);
- } else {
- rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to destroy flow.");
- }
-
- return ret;
-}
-
-static int
-bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
-{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- struct bnxt_vnic_info *vnic;
- struct rte_flow *flow;
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < bp->nr_vnics; i++) {
- vnic = &bp->vnic_info[i];
- STAILQ_FOREACH(flow, &vnic->flow_list, next) {
- struct bnxt_filter_info *filter = flow->filter;
-
- if (filter->filter_type == HWRM_CFA_EM_FILTER)
- ret = bnxt_hwrm_clear_em_filter(bp, filter);
- if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
- ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
-
- if (ret) {
- rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL,
- "Failed to flush flow in HW.");
- return -rte_errno;
- }
-
- STAILQ_REMOVE(&vnic->flow_list, flow,
- rte_flow, next);
- rte_free(flow);
- }
- }
-
- return ret;
-}
-
-const struct rte_flow_ops bnxt_flow_ops = {
- .validate = bnxt_flow_validate,
- .create = bnxt_flow_create,
- .destroy = bnxt_flow_destroy,
- .flush = bnxt_flow_flush,
-};
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index d27be703..a1ecfb19 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -69,7 +69,6 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp);
void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic);
-int bnxt_check_zero_bytes(const uint8_t *bytes, int len);
#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
new file mode 100644
index 00000000..ac765674
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -0,0 +1,1171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_vnic.h"
+#include "bnxt_util.h"
+#include "hsi_struct_def_dpdk.h"
+
+static int
+bnxt_flow_args_validate(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL,
+ "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL,
+ "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL,
+ "NULL attribute.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static const struct rte_flow_item *
+bnxt_flow_non_void_item(const struct rte_flow_item *cur)
+{
+ while (1) {
+ if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return cur;
+ cur++;
+ }
+}
+
+static const struct rte_flow_action *
+bnxt_flow_non_void_action(const struct rte_flow_action *cur)
+{
+ while (1) {
+ if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return cur;
+ cur++;
+ }
+}
+
+static int
+bnxt_filter_type_check(const struct rte_flow_item pattern[],
+ struct rte_flow_error *error __rte_unused)
+{
+ const struct rte_flow_item *item =
+ bnxt_flow_non_void_item(pattern);
+ int use_ntuple = 1;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ use_ntuple = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ use_ntuple = 0;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* FALLTHROUGH */
+ /* need ntuple match, reset exact match */
+ if (!use_ntuple) {
+ PMD_DRV_LOG(ERR,
+ "VLAN flow cannot use NTUPLE filter\n");
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Cannot use VLAN with NTUPLE");
+ return -rte_errno;
+ }
+ use_ntuple |= 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown Flow type\n");
+ use_ntuple |= 1;
+ }
+ item++;
+ }
+ return use_ntuple;
+}
+
+static int
+bnxt_validate_and_parse_flow_type(struct bnxt *bp,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct bnxt_filter_info *filter)
+{
+ const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
+ uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t tenant_id_be = 0;
+ bool vni_masked = 0;
+ bool tni_masked = 0;
+ uint32_t vf = 0;
+ int use_ntuple;
+ uint32_t en = 0;
+ uint32_t en_ethertype;
+ int dflt_vnic;
+
+ use_ntuple = bnxt_filter_type_check(pattern, error);
+ PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
+ if (use_ntuple < 0)
+ return use_ntuple;
+
+ filter->filter_type = use_ntuple ?
+ HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
+ en_ethertype = use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
+ EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (item->last) {
+ /* last or range is NOT supported as match criteria */
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "No support for range");
+ return -rte_errno;
+ }
+
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "spec/mask is NULL");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ /* Source MAC address mask cannot be partially set.
+ * Should be All 0's or all 1's.
+ * Destination MAC address mask must not be partially
+ * set. Should be all 1's or all 0's.
+ */
+ if ((!is_zero_ether_addr(&eth_mask->src) &&
+ !is_broadcast_ether_addr(&eth_mask->src)) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MAC_addr mask not valid");
+ return -rte_errno;
+ }
+
+ /* Mask is not allowed. Only exact matches are */
+ if (eth_mask->type &&
+ eth_mask->type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ethertype mask not valid");
+ return -rte_errno;
+ }
+
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ rte_memcpy(filter->dst_macaddr,
+ &eth_spec->dst, 6);
+ en |= use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
+ EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
+ }
+
+ if (is_broadcast_ether_addr(&eth_mask->src)) {
+ rte_memcpy(filter->src_macaddr,
+ &eth_spec->src, 6);
+ en |= use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
+ EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
+ } /*
+ * else {
+ * PMD_DRV_LOG(ERR, "Handle this condition\n");
+ * }
+ */
+ if (eth_mask->type) {
+ filter->ethertype =
+ rte_be_to_cpu_16(eth_spec->type);
+ en |= en_ethertype;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (en & en_ethertype) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN TPID matching is not"
+ " supported");
+ return -rte_errno;
+ }
+ if (vlan_mask->tci &&
+ vlan_mask->tci == RTE_BE16(0x0fff)) {
+ /* Only the VLAN ID can be matched. */
+ filter->l2_ovlan =
+ rte_be_to_cpu_16(vlan_spec->tci &
+ RTE_BE16(0x0fff));
+ en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
+ } else {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN mask is invalid");
+ return -rte_errno;
+ }
+ if (vlan_mask->inner_type &&
+ vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "inner ethertype mask not"
+ " valid");
+ return -rte_errno;
+ }
+ if (vlan_mask->inner_type) {
+ filter->ethertype =
+ rte_be_to_cpu_16(vlan_spec->inner_type);
+ en |= en_ethertype;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ /* If mask is not involved, we could use EM filters. */
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+ /* Only IP DST and SRC fields are maskable. */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
+ filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
+ EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
+
+ if (ipv4_mask->hdr.src_addr) {
+ filter->src_ipaddr_mask[0] =
+ ipv4_mask->hdr.src_addr;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ }
+
+ if (ipv4_mask->hdr.dst_addr) {
+ filter->dst_ipaddr_mask[0] =
+ ipv4_mask->hdr.dst_addr;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ }
+
+ filter->ip_addr_type = use_ntuple ?
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+
+ if (ipv4_spec->hdr.next_proto_id) {
+ filter->ip_protocol =
+ ipv4_spec->hdr.next_proto_id;
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+
+ /* Only IP DST and SRC fields are maskable. */
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask.");
+ return -rte_errno;
+ }
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
+ EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
+
+ rte_memcpy(filter->src_ipaddr,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->dst_ipaddr,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
+ 16)) {
+ rte_memcpy(filter->src_ipaddr_mask,
+ ipv6_mask->hdr.src_addr, 16);
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ }
+
+ if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
+ 16)) {
+ rte_memcpy(filter->dst_ipaddr_mask,
+ ipv6_mask->hdr.dst_addr, 16);
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ }
+
+ filter->ip_addr_type = use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
+ EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ /* Check TCP mask. Only DST & SRC ports are maskable */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
+ EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
+
+ if (tcp_mask->hdr.dst_port) {
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ }
+
+ if (tcp_mask->hdr.src_port) {
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ filter->src_port = udp_spec->hdr.src_port;
+ filter->dst_port = udp_spec->hdr.dst_port;
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
+ EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
+
+ if (udp_mask->hdr.dst_port) {
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ }
+
+ if (udp_mask->hdr.src_port) {
+ filter->src_port_mask = udp_mask->hdr.src_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
+ vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
+ vxlan_spec->flags != 0x8) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_spec && vxlan_mask) {
+ vni_masked =
+ !!memcmp(vxlan_mask->vni, vni_mask,
+ RTE_DIM(vni_mask));
+ if (vni_masked) {
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->vni =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec = item->spec;
+ nvgre_mask = item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
+ nvgre_spec->protocol != 0x6558) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec && nvgre_mask) {
+ tni_masked =
+ !!memcmp(nvgre_mask->tni, tni_mask,
+ RTE_DIM(tni_mask));
+ if (tni_masked) {
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->vni =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = item->spec;
+ vf = vf_spec->id;
+
+ if (!BNXT_PF(bp)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Configuring on a VF!");
+ return -rte_errno;
+ }
+
+ if (vf >= bp->pdev->max_vfs) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Incorrect VF id!");
+ return -rte_errno;
+ }
+
+ if (!attr->transfer) {
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Matching VF traffic without"
+ " affecting it (transfer attribute)"
+ " is unsupported");
+ return -rte_errno;
+ }
+
+ filter->mirror_vnic_id =
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver
+ * loaded. This is not an error.
+ */
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unable to get default VNIC for VF");
+ return -rte_errno;
+ }
+
+ filter->mirror_vnic_id = dflt_vnic;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
+ break;
+ default:
+ break;
+ }
+ item++;
+ }
+ filter->enables = en;
+
+ return 0;
+}
+
+/* Parse attributes */
+static int
+bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr,
+ "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr,
+ "No support for egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr,
+ "No support for priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr,
+ "No support for group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+struct bnxt_filter_info *
+bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
+ struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter1, *f0;
+ struct bnxt_vnic_info *vnic0;
+ int rc;
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ f0 = STAILQ_FIRST(&vnic0->filter);
+
+ /* This flow has same DST MAC as the port/l2 filter. */
+ if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
+ return f0;
+
+ /* This flow needs DST MAC which is not same as port/l2 */
+ PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
+ filter1 = bnxt_get_unused_filter(bp);
+ if (filter1 == NULL)
+ return NULL;
+
+ filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
+ memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
+ memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
+ filter1);
+ if (rc) {
+ bnxt_free_filter(bp, filter1);
+ return NULL;
+ }
+ return filter1;
+}
+
+static int
+bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error,
+ struct bnxt_filter_info *filter)
+{
+ const struct rte_flow_action *act =
+ bnxt_flow_non_void_action(actions);
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *act_vf;
+ struct bnxt_vnic_info *vnic, *vnic0;
+ struct bnxt_filter_info *filter1;
+ uint32_t vf = 0;
+ int dflt_vnic;
+ int rc;
+
+ if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Cannot create flow on RSS queues");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ rc =
+ bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
+ if (rc != 0)
+ goto ret;
+
+ rc = bnxt_flow_parse_attr(attr, error);
+ if (rc != 0)
+ goto ret;
+
+ /* Since we support ingress attribute only - right now. */
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
+
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ /* Allow this flow. Redirect to a VNIC. */
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ if (act_q->index >= bp->rx_nr_rings) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Invalid queue ID.");
+ rc = -rte_errno;
+ goto ret;
+ }
+ PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
+ if (vnic == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "No matching VNIC for queue ID.");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->dst_id = vnic->fw_vnic_id;
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ PMD_DRV_LOG(DEBUG, "VNIC found\n");
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ filter->flags =
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
+ else
+ filter->flags =
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VF:
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ vf = act_vf->id;
+
+ if (!BNXT_PF(bp)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Configuring on a VF!");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ if (vf >= bp->pdev->max_vfs) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Incorrect VF id!");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->mirror_vnic_id =
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver loaded.
+ * This is not an error.
+ */
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Unable to get default VNIC for VF");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->mirror_vnic_id = dflt_vnic;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Invalid action.");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ if (filter1) {
+ bnxt_free_filter(bp, filter1);
+ filter1->fw_l2_filter_id = -1;
+ }
+
+ act = bnxt_flow_non_void_action(++act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Invalid action.");
+ rc = -rte_errno;
+ goto ret;
+ }
+ret:
+ return rc;
+}
+
+static int
+bnxt_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter;
+ int ret = 0;
+
+ ret = bnxt_flow_args_validate(attr, pattern, actions, error);
+ if (ret != 0)
+ return ret;
+
+ filter = bnxt_get_unused_filter(bp);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
+ return -ENOMEM;
+ }
+
+ ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
+ error, filter);
+ /* No need to hold on to this filter if we are just validating flow */
+ filter->fw_l2_filter_id = UINT64_MAX;
+ bnxt_free_filter(bp, filter);
+
+ return ret;
+}
+
+static int
+bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
+{
+ struct bnxt_filter_info *mf;
+ struct rte_flow *flow;
+ int i;
+
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ mf = flow->filter;
+
+ if (mf->filter_type == nf->filter_type &&
+ mf->flags == nf->flags &&
+ mf->src_port == nf->src_port &&
+ mf->src_port_mask == nf->src_port_mask &&
+ mf->dst_port == nf->dst_port &&
+ mf->dst_port_mask == nf->dst_port_mask &&
+ mf->ip_protocol == nf->ip_protocol &&
+ mf->ip_addr_type == nf->ip_addr_type &&
+ mf->ethertype == nf->ethertype &&
+ mf->vni == nf->vni &&
+ mf->tunnel_type == nf->tunnel_type &&
+ mf->l2_ovlan == nf->l2_ovlan &&
+ mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
+ mf->l2_ivlan == nf->l2_ivlan &&
+ mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
+ !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
+ !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_macaddr, nf->src_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->dst_macaddr, nf->dst_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_ipaddr, nf->src_ipaddr,
+ sizeof(nf->src_ipaddr)) &&
+ !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
+ sizeof(nf->src_ipaddr_mask)) &&
+ !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
+ sizeof(nf->dst_ipaddr)) &&
+ !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
+ sizeof(nf->dst_ipaddr_mask))) {
+ if (mf->dst_id == nf->dst_id)
+ return -EEXIST;
+ /*
+ * Same Flow, Different queue
+ * Clear the old ntuple filter
+ * Reuse the matching L2 filter
+ * ID for the new filter
+ */
+ nf->fw_l2_filter_id = mf->fw_l2_filter_id;
+ if (nf->filter_type == HWRM_CFA_EM_FILTER)
+ bnxt_hwrm_clear_em_filter(bp, mf);
+ if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ bnxt_hwrm_clear_ntuple_filter(bp, mf);
+ /* Free the old filter, update flow
+ * with new filter
+ */
+ bnxt_free_filter(bp, mf);
+ flow->filter = nf;
+ return -EXDEV;
+ }
+ }
+ }
+ return 0;
+}
+
+static struct rte_flow *
+bnxt_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter;
+ struct bnxt_vnic_info *vnic = NULL;
+ bool update_flow = false;
+ struct rte_flow *flow;
+ unsigned int i;
+ int ret = 0;
+
+ flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = bnxt_flow_args_validate(attr, pattern, actions, error);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Not a validate flow.\n");
+ goto free_flow;
+ }
+
+ filter = bnxt_get_unused_filter(bp);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
+ goto free_flow;
+ }
+
+ ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
+ error, filter);
+ if (ret != 0)
+ goto free_filter;
+
+ ret = bnxt_match_filter(bp, filter);
+ if (ret == -EEXIST) {
+ PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
+ /* Clear the filter that was created as part of
+ * validate_and_parse_flow() above
+ */
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ goto free_filter;
+ } else if (ret == -EXDEV) {
+ PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
+ PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
+ update_flow = true;
+ }
+
+ if (filter->filter_type == HWRM_CFA_EM_FILTER) {
+ filter->enables |=
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
+ }
+
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
+ filter->enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
+ }
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ if (filter->dst_id == vnic->fw_vnic_id)
+ break;
+ }
+
+ if (!ret) {
+ flow->filter = filter;
+ flow->vnic = vnic;
+ if (update_flow) {
+ ret = -EXDEV;
+ goto free_flow;
+ }
+ PMD_DRV_LOG(ERR, "Successfully created flow.\n");
+ STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
+ return flow;
+ }
+free_filter:
+ bnxt_free_filter(bp, filter);
+free_flow:
+ if (ret == -EEXIST)
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Matching Flow exists.");
+ else if (ret == -EXDEV)
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Flow with pattern exists, updating destination queue");
+ else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ flow = NULL;
+ return flow;
+}
+
+static int
+bnxt_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter = flow->filter;
+ struct bnxt_vnic_info *vnic = flow->vnic;
+ int ret = 0;
+
+ ret = bnxt_match_filter(bp, filter);
+ if (ret == 0)
+ PMD_DRV_LOG(ERR, "Could not find matching flow\n");
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ ret = bnxt_hwrm_clear_em_filter(bp, filter);
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ else
+ ret = bnxt_hwrm_clear_l2_filter(bp, filter);
+ if (!ret) {
+ STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
+ rte_free(flow);
+ } else {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+ }
+
+ return ret;
+}
+
+static int
+bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+ struct rte_flow *flow;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ struct bnxt_filter_info *filter = flow->filter;
+
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ ret = bnxt_hwrm_clear_em_filter(bp, filter);
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+
+ if (ret) {
+ rte_flow_error_set
+ (error,
+ -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Failed to flush flow in HW.");
+ return -rte_errno;
+ }
+
+ STAILQ_REMOVE(&vnic->flow_list, flow,
+ rte_flow, next);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+const struct rte_flow_ops bnxt_flow_ops = {
+ .validate = bnxt_flow_validate,
+ .create = bnxt_flow_create,
+ .destroy = bnxt_flow_destroy,
+ .flush = bnxt_flow_flush,
+};
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d6fdc1b8..c682488a 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -166,10 +166,26 @@ err_ret:
req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
} while (0)
+#define HWRM_CHECK_RESULT_SILENT() do {\
+ if (rc) { \
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ return rc; \
+ } \
+ if (resp->error_code) { \
+ rc = rte_le_to_cpu_16(resp->error_code); \
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ return rc; \
+ } \
+} while (0)
+
#define HWRM_CHECK_RESULT() do {\
if (rc) { \
PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
return rc; \
} \
if (resp->error_code) { \
@@ -188,6 +204,10 @@ err_ret:
PMD_DRV_LOG(ERR, "error %d\n", rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
return rc; \
} \
} while (0)
@@ -376,13 +396,13 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
req.l2_ovlan = filter->l2_ovlan;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
- req.l2_ovlan = filter->l2_ivlan;
+ req.l2_ivlan = filter->l2_ivlan;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
req.l2_ovlan_mask = filter->l2_ovlan_mask;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
- req.l2_ovlan_mask = filter->l2_ivlan_mask;
+ req.l2_ivlan_mask = filter->l2_ivlan_mask;
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
req.src_id = rte_cpu_to_le_32(filter->src_id);
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
@@ -506,6 +526,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (BNXT_PF(bp)) {
bp->pf.port_id = resp->port_id;
bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+ bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
new_max_vfs = bp->pdev->max_vfs;
if (new_max_vfs != bp->pf.max_vfs) {
if (bp->pf.vf_info)
@@ -657,9 +678,19 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
return rc;
}
-int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp)
+int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
+{
+ if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
+ return 0;
+
+ return bnxt_hwrm_func_reserve_vf_resc(bp, true);
+}
+
+int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
{
int rc;
+ uint32_t flags = 0;
+ uint32_t enables;
struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_vf_cfg_input req = {0};
@@ -670,7 +701,8 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp)
HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
- HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
@@ -679,10 +711,35 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp)
req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
bp->tx_nr_rings);
req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
+ req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
+ if (bp->vf_resv_strategy ==
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
+ req.enables |= rte_cpu_to_le_32(enables);
+ req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
+ req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
+ req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
+ }
+
+ if (test)
+ flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
+
+ req.flags = rte_cpu_to_le_32(flags);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT();
+ if (test)
+ HWRM_CHECK_RESULT_SILENT();
+ else
+ HWRM_CHECK_RESULT();
+
HWRM_UNLOCK();
return rc;
}
@@ -710,6 +767,11 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
}
+ bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
+ if (bp->vf_resv_strategy >
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
+ bp->vf_resv_strategy =
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
HWRM_UNLOCK();
return rc;
@@ -1265,8 +1327,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
/* map ring groups to this vnic */
PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
- for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
+ for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
+
vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
@@ -1559,6 +1622,11 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint16_t size;
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
HWRM_PREP(req, VNIC_PLCMODES_CFG);
req.flags = rte_cpu_to_le_32(
@@ -1816,8 +1884,7 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
return rc;
}
-static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
- unsigned int idx __rte_unused)
+static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
@@ -1829,17 +1896,52 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->cp_raw_cons = 0;
}
+void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
+{
+ struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
+ memset(rxr->rx_desc_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_desc_ring));
+ memset(rxr->rx_buf_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_buf_ring));
+ rxr->rx_prod = 0;
+ }
+ ring = rxr->ag_ring_struct;
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(rxr->ag_buf_ring, 0,
+ rxr->ag_ring_struct->ring_size *
+ sizeof(*rxr->ag_buf_ring));
+ rxr->ag_prod = 0;
+ bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ bnxt_free_cp_ring(bp, cpr);
+
+ bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
+}
+
int bnxt_free_all_hwrm_rings(struct bnxt *bp)
{
unsigned int i;
- int rc = 0;
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
- unsigned int idx = bp->rx_cp_nr_rings + i;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
@@ -1855,59 +1957,15 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
txr->tx_cons = 0;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_free_cp_ring(bp, cpr, idx);
- cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
-
- for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- struct bnxt_rx_queue *rxq = bp->rx_queues[i];
- struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
- struct bnxt_ring *ring = rxr->rx_ring_struct;
- struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_hwrm_ring_free(bp, ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_RX);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
- memset(rxr->rx_desc_ring, 0,
- rxr->rx_ring_struct->ring_size *
- sizeof(*rxr->rx_desc_ring));
- memset(rxr->rx_buf_ring, 0,
- rxr->rx_ring_struct->ring_size *
- sizeof(*rxr->rx_buf_ring));
- rxr->rx_prod = 0;
- }
- ring = rxr->ag_ring_struct;
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_hwrm_ring_free(bp, ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_RX);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- memset(rxr->ag_buf_ring, 0,
- rxr->ag_ring_struct->ring_size *
- sizeof(*rxr->ag_buf_ring));
- rxr->ag_prod = 0;
- bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
- }
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_free_cp_ring(bp, cpr, i);
- bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+ bnxt_free_cp_ring(bp, cpr);
cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
}
}
- /* Default completion ring */
- {
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
-
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_free_cp_ring(bp, cpr, 0);
- cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
+ for (i = 0; i < bp->rx_cp_nr_rings; i++)
+ bnxt_free_hwrm_rx_ring(bp, i);
- return rc;
+ return 0;
}
int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
@@ -1970,6 +2028,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
else
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
//if (rc)
//break;
}
@@ -2057,6 +2116,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
bnxt_hwrm_vnic_free(bp, vnic);
+
+ rte_free(vnic->fw_grp_ids);
}
/* Ring resources */
bnxt_free_all_hwrm_rings(bp);
@@ -3151,7 +3212,9 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
struct bnxt_pf_info *pf = &bp->pf;
int rc;
- if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+ /* Not allowed on NS2 device, NPAR, MultiHost, VF */
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
+ BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
return 0;
HWRM_PREP(req, PORT_CLR_STATS);
@@ -3298,13 +3361,12 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT();
- HWRM_UNLOCK();
-
if (rc == 0)
memcpy(data, buf, len > buflen ? buflen : len);
rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -3336,12 +3398,13 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
req.offset = rte_cpu_to_le_32(offset);
req.len = rte_cpu_to_le_32(length);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT();
- HWRM_UNLOCK();
if (rc == 0)
memcpy(data, buf, length);
rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
return rc;
}
@@ -3372,14 +3435,6 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
rte_iova_t dma_handle;
uint8_t *buf;
- HWRM_PREP(req, NVM_WRITE);
-
- req.dir_type = rte_cpu_to_le_16(dir_type);
- req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
- req.dir_ext = rte_cpu_to_le_16(dir_ext);
- req.dir_attr = rte_cpu_to_le_16(dir_attr);
- req.dir_data_length = rte_cpu_to_le_32(data_len);
-
buf = rte_malloc("nvm_write", data_len, 0);
rte_mem_lock_page(buf);
if (!buf)
@@ -3392,14 +3447,22 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
return -ENOMEM;
}
memcpy(buf, data, data_len);
+
+ HWRM_PREP(req, NVM_WRITE);
+
+ req.dir_type = rte_cpu_to_le_16(dir_type);
+ req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
+ req.dir_ext = rte_cpu_to_le_16(dir_ext);
+ req.dir_attr = rte_cpu_to_le_16(dir_attr);
+ req.dir_data_length = rte_cpu_to_le_32(data_len);
req.host_src_addr = rte_cpu_to_le_64(dma_handle);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rte_free(buf);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- rte_free(buf);
return rc;
}
@@ -3800,7 +3863,6 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
HWRM_UNLOCK();
filter->fw_ntuple_filter_id = UINT64_MAX;
- filter->fw_l2_filter_id = UINT64_MAX;
return 0;
}
@@ -3832,3 +3894,54 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
}
return 0;
}
+
+static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ uint16_t flags;
+
+ req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
+
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
+
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ req->num_cmpl_dma_aggr_during_int =
+ rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
+
+ req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
+
+ /* min timer set to 1/2 of interrupt timer */
+ req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
+
+ /* buf timer set to 1/4 of interrupt timer */
+ req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
+
+ req->cmpl_aggr_dma_tmr_during_int =
+ rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
+
+ flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
+ req->flags = rte_cpu_to_le_16(flags);
+}
+
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
+ struct bnxt_coal *coal, uint16_t ring_id)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Set ring coalesce parameters only for Stratus 100G NIC */
+ if (!bnxt_stratus_device(bp))
+ return 0;
+
+ HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ bnxt_hwrm_set_coal_params(coal, &req);
+ req.ring_id = rte_cpu_to_le_16(ring_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 60a4ab16..379aac6e 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -29,6 +29,9 @@ struct bnxt_cp_ring_info;
#define HWRM_QUEUE_SERVICE_PROFILE_LOSSY \
HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY
+#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC \
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
@@ -107,12 +110,13 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_free_all_hwrm_resources(struct bnxt *bp);
void bnxt_free_hwrm_resources(struct bnxt *bp);
+void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index);
int bnxt_alloc_hwrm_resources(struct bnxt *bp);
int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
int bnxt_hwrm_func_qcfg(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp);
-int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp);
+int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test);
int bnxt_hwrm_allocate_pf_only(struct bnxt *bp);
int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs);
int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf,
@@ -167,4 +171,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
int bnxt_hwrm_ptp_cfg(struct bnxt *bp);
int bnxt_vnic_rss_configure(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
+ struct bnxt_coal *coal, uint16_t ring_id);
+int bnxt_hwrm_check_vf_rings(struct bnxt *bp);
#endif
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index bb9f6d1c..fcbd6bc6 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -258,6 +258,116 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
return 0;
}
+static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
+{
+ /* Tick values in micro seconds.
+ * 1 coal_buf x bufs_per_record = 1 completion record.
+ */
+ coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
+ coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
+ /* min timer set to 1/2 of interrupt timer */
+ coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
+ /* buf timer set to 1/4 of interrupt timer */
+ coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
+ coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
+}
+
+int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
+{
+ struct rte_pci_device *pci_dev = bp->pdev;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
+ int rc = 0;
+
+ bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
+
+ /* Rx cmpl */
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ queue_index, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+
+ cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
+ queue_index * BNXT_DB_SIZE;
+ bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+ if (!queue_index) {
+ /*
+ * In order to save completion resources, use the first
+ * completion ring from PF or VF as the default completion ring
+ * for async event and HWRM forward response handling.
+ */
+ bp->def_cp_ring = cpr;
+ rc = bnxt_hwrm_set_async_event_cr(bp);
+ if (rc)
+ goto err_out;
+ }
+ /* Rx ring */
+ rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ queue_index, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+
+ rxr->rx_prod = 0;
+ rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
+ queue_index * BNXT_DB_SIZE;
+ bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+
+ ring = rxr->ag_ring_struct;
+ /* Agg ring */
+ if (!ring)
+ PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
+
+ rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ map_idx, HWRM_NA_SIGNATURE,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+
+ PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
+ rxr->ag_prod = 0;
+ rxr->ag_doorbell = (char *)pci_dev->mem_resource[2].addr +
+ map_idx * BNXT_DB_SIZE;
+ bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
+
+ if (bp->eth_dev->data->rx_queue_state[queue_index] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ if (bnxt_init_one_rx_ring(rxq)) {
+ RTE_LOG(ERR, PMD,
+ "bnxt_init_one_rx_ring failed!\n");
+ bnxt_rx_queue_release_op(rxq);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+ }
+ rxq->index = queue_index;
+ PMD_DRV_LOG(INFO,
+ "queue %d, rx_deferred_start %d, state %d!\n",
+ queue_index, rxq->rx_deferred_start,
+ bp->eth_dev->data->rx_queue_state[queue_index]);
+
+err_out:
+ return rc;
+}
/* ring_grp usage:
* [0] = default completion ring
* [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
@@ -265,9 +375,12 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
*/
int bnxt_alloc_hwrm_rings(struct bnxt *bp)
{
+ struct bnxt_coal coal;
unsigned int i;
int rc = 0;
+ bnxt_init_dflt_coal(&coal);
+
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@@ -291,6 +404,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
cpr->cp_doorbell = (char *)bp->doorbell_base + i * 0x80;
bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
if (!i) {
/*
@@ -379,6 +493,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
txr->tx_doorbell = (char *)bp->doorbell_base + idx * 0x80;
txq->index = idx;
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
}
err_out:
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index 65bf3e2f..1446d784 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -70,6 +70,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
const char *suffix);
+int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index);
int bnxt_alloc_hwrm_rings(struct bnxt *bp);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index c55ddec4..832fc9ec 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -199,12 +199,14 @@ err_out:
return rc;
}
-static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
+void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
{
struct bnxt_sw_rx_bd *sw_ring;
struct bnxt_tpa_info *tpa_info;
uint16_t i;
+ rte_spinlock_lock(&rxq->lock);
+
if (rxq) {
sw_ring = rxq->rx_ring->rx_buf_ring;
if (sw_ring) {
@@ -239,6 +241,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
}
}
}
+
+ rte_spinlock_unlock(&rxq->lock);
}
void bnxt_free_rx_mbufs(struct bnxt *bp)
@@ -286,6 +290,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
+ uint8_t queue_state;
if (queue_idx >= bp->max_rx_rings) {
PMD_DRV_LOG(ERR,
@@ -326,8 +331,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- rxq->crc_len = rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP ?
- 0 : ETHER_CRC_LEN;
+ rxq->crc_len = rte_eth_dev_must_keep_crc(rx_offloads) ?
+ ETHER_CRC_LEN : 0;
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
@@ -341,6 +346,11 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
}
rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED :
+ RTE_ETH_QUEUE_STATE_STARTED;
+ eth_dev->data->rx_queue_state[queue_idx] = queue_state;
+ rte_spinlock_init(&rxq->lock);
out:
return rc;
}
@@ -389,6 +399,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
+ int rc = 0;
if (rxq == NULL) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
@@ -396,28 +407,47 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- rxq->rx_deferred_start = false;
+
+ bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
+ bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
+
if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
return 0;
- PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
- vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+
+ PMD_DRV_LOG(DEBUG,
+ "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id].fw_grp_id);
+
vnic->fw_grp_ids[rx_queue_id] =
- bp->grp_info[rx_queue_id + 1].fw_grp_id;
- return bnxt_vnic_rss_configure(bp, vnic);
+ bp->grp_info[rx_queue_id].fw_grp_id;
+ rc = bnxt_vnic_rss_configure(bp, vnic);
}
- return 0;
+ if (rc == 0)
+ rxq->rx_deferred_start = false;
+
+ return rc;
}
int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
- struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
+ struct bnxt_rx_queue *rxq = NULL;
+ int rc = 0;
+
+ /* Rx CQ 0 also works as Default CQ for async notifications */
+ if (!rx_queue_id) {
+ PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ rxq = bp->rx_queues[rx_queue_id];
if (rxq == NULL) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
@@ -431,7 +461,11 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
- return bnxt_vnic_rss_configure(bp, vnic);
+ rc = bnxt_vnic_rss_configure(bp, vnic);
}
- return 0;
+
+ if (rc == 0)
+ bnxt_rx_queue_release_mbufs(rxq);
+
+ return rc;
}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 8307f603..e5d6001d 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -10,6 +10,9 @@ struct bnxt;
struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
struct bnxt_rx_queue {
+ rte_spinlock_t lock; /* Synchronize between rx_queue_stop
+ * and fast path
+ */
struct rte_mempool *mb_pool; /* mbuf pool for RX ring */
struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */
struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */
@@ -54,4 +57,5 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
+void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 9d884292..c7bc8848 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -540,8 +540,10 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
int rc = 0;
bool evt = false;
- /* If Rx Q was stopped return */
- if (rxq->rx_deferred_start)
+ /* If Rx Q was stopped return. RxQ0 cannot be stopped. */
+ if (unlikely(((rxq->rx_deferred_start ||
+ !rte_spinlock_trylock(&rxq->lock)) &&
+ rxq->queue_id)))
return 0;
/* Handle RX burst request */
@@ -572,18 +574,20 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
raw_cons = NEXT_RAW_CMP(raw_cons);
if (nb_rx_pkts == nb_pkts || evt)
break;
+ /* Post some Rx buf early in case of larger burst processing */
+ if (nb_rx_pkts == BNXT_RX_POST_THRESH)
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
}
cpr->cp_raw_cons = raw_cons;
- if ((prod == rxr->rx_prod && ag_prod == rxr->ag_prod) && !evt) {
+ if (!nb_rx_pkts && !evt) {
/*
* For PMD, there is no need to keep on pushing to REARM
* the doorbell if there are no new completions
*/
- return nb_rx_pkts;
+ goto done;
}
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
if (prod != rxr->rx_prod)
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
@@ -591,6 +595,8 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (ag_prod != rxr->ag_prod)
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
/* Attempt to alloc Rx buf in case of a previous allocation failure. */
if (rc == -ENOMEM) {
int i;
@@ -614,16 +620,22 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
}
}
+done:
+ rte_spinlock_unlock(&rxq->lock);
+
return nb_rx_pkts;
}
void bnxt_free_rx_rings(struct bnxt *bp)
{
int i;
+ struct bnxt_rx_queue *rxq;
- for (i = 0; i < (int)bp->rx_nr_rings; i++) {
- struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ if (!bp->rx_queues)
+ return;
+ for (i = 0; i < (int)bp->rx_nr_rings; i++) {
+ rxq = bp->rx_queues[i];
if (!rxq)
continue;
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index 5b28f032..3815a219 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -54,6 +54,8 @@
#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
!((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
+#define BNXT_RX_POST_THRESH 32
+
enum pkt_hash_types {
PKT_HASH_TYPE_NONE, /* Undefined type */
PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index bbd4e78b..a5d3c866 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -278,6 +278,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
count = 0;
for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(
*(uint64_t *)((char *)rx_stats +
bnxt_rx_stats_strings[i].offset));
@@ -286,6 +287,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(
*(uint64_t *)((char *)tx_stats +
bnxt_tx_stats_strings[i].offset));
@@ -293,6 +295,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
}
/* The Tx drop pkts aka the Anti spoof coounter */
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
count++;
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index 720ca90c..f2c712a7 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -24,6 +24,7 @@ struct bnxt_tx_queue {
uint8_t wthresh; /* Write-back threshold reg */
uint32_t ctx_curr; /* Hardware context states */
uint8_t tx_deferred_start; /* not in global dev start */
+ uint8_t cmpl_next; /* Next BD to trigger a compl */
struct bnxt *bp;
int index;
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 470fddd5..67bb35e0 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -114,7 +114,9 @@ static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
}
static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
- struct bnxt_tx_queue *txq)
+ struct bnxt_tx_queue *txq,
+ uint16_t *coal_pkts,
+ uint16_t *cmpl_next)
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct tx_bd_long *txbd;
@@ -133,7 +135,9 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
- PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM))
+ PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
+ PKT_TX_TUNNEL_GENEVE))
long_bd = true;
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
@@ -146,14 +150,21 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
return -ENOMEM;
txbd = &txr->tx_desc_ring[txr->tx_prod];
- txbd->opaque = txr->tx_prod;
+ txbd->opaque = *coal_pkts;
txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
+ txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
+ if (!*cmpl_next) {
+ txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
+ } else {
+ *coal_pkts = 0;
+ *cmpl_next = false;
+ }
txbd->len = tx_pkt->data_len;
- if (txbd->len >= 2014)
+ if (tx_pkt->pkt_len >= 2014)
txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
else
- txbd->flags_type |= lhint_arr[txbd->len >> 9];
- txbd->address = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf));
+ txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
+ txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf));
if (long_bd) {
txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
@@ -194,16 +205,46 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
+ PKT_TX_OIP_IIP_TCP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
+ PKT_TX_OIP_IIP_UDP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
PKT_TX_IIP_TCP_UDP_CKSUM) {
/* (Inner) IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
+ PKT_TX_IIP_UDP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
+ PKT_TX_IIP_TCP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
PKT_TX_OIP_TCP_UDP_CKSUM) {
/* Outer IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
+ PKT_TX_OIP_UDP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
+ PKT_TX_OIP_TCP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
PKT_TX_OIP_IIP_CKSUM) {
/* Outer IP, Inner IP CSO */
@@ -214,11 +255,23 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
+ PKT_TX_TCP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
+ PKT_TX_UDP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
+ PKT_TX_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
+ PKT_TX_OUTER_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
txbd1->mss = 0;
@@ -234,14 +287,15 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
txbd = &txr->tx_desc_ring[txr->tx_prod];
- txbd->address = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg));
- txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
+ txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
+ txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
txbd->len = m_seg->data_len;
m_seg = m_seg->next;
}
txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
+ txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
@@ -278,35 +332,44 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
uint32_t raw_cons = cpr->cp_raw_cons;
uint32_t cons;
- int nb_tx_pkts = 0;
+ uint32_t nb_tx_pkts = 0;
struct tx_cmpl *txcmp;
+ struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
+ struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
+ uint32_t ring_mask = cp_ring_struct->ring_mask;
+ uint32_t opaque = 0;
- if ((txq->tx_ring->tx_ring_struct->ring_size -
- (bnxt_tx_avail(txq->tx_ring))) >
- txq->tx_free_thresh) {
- while (1) {
- cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
- txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
-
- if (!CMP_VALID(txcmp, raw_cons, cpr->cp_ring_struct))
- break;
- cpr->valid = FLIP_VALID(cons,
- cpr->cp_ring_struct->ring_mask,
- cpr->valid);
-
- if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
- nb_tx_pkts++;
- else
- RTE_LOG_DP(DEBUG, PMD,
- "Unhandled CMP type %02x\n",
- CMP_TYPE(txcmp));
- raw_cons = NEXT_RAW_CMP(raw_cons);
- }
- if (nb_tx_pkts)
- bnxt_tx_cmp(txq, nb_tx_pkts);
+ if (((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) &
+ txq->tx_ring->tx_ring_struct->ring_mask) < txq->tx_free_thresh)
+ return 0;
+
+ do {
+ cons = RING_CMPL(ring_mask, raw_cons);
+ txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
+ rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) &
+ ring_mask]);
+
+ if (!CMPL_VALID(txcmp, cpr->valid))
+ break;
+ opaque = rte_cpu_to_le_32(txcmp->opaque);
+ NEXT_CMPL(cpr, cons, cpr->valid, 1);
+ rte_prefetch0(&cp_desc_ring[cons]);
+
+ if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
+ nb_tx_pkts += opaque;
+ else
+ RTE_LOG_DP(ERR, PMD,
+ "Unhandled CMP type %02x\n",
+ CMP_TYPE(txcmp));
+ raw_cons = cons;
+ } while (nb_tx_pkts < ring_mask);
+
+ if (nb_tx_pkts) {
+ bnxt_tx_cmp(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ B_CP_DB(cpr, cpr->cp_raw_cons, ring_mask);
}
+
return nb_tx_pkts;
}
@@ -315,8 +378,8 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct bnxt_tx_queue *txq = tx_queue;
uint16_t nb_tx_pkts = 0;
- uint16_t db_mask = txq->tx_ring->tx_ring_struct->ring_size >> 2;
- uint16_t last_db_mask = 0;
+ uint16_t coal_pkts = 0;
+ uint16_t cmpl_next = txq->cmpl_next;
/* Handle TX completions */
bnxt_handle_tx_cp(txq);
@@ -326,16 +389,25 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
return 0;
}
+
+ txq->cmpl_next = 0;
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
- if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) {
+ int rc;
+
+ /* Request a completion on first and last packet */
+ cmpl_next |= (nb_pkts == nb_tx_pkts + 1);
+ coal_pkts++;
+ rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
+ &coal_pkts, &cmpl_next);
+
+ if (unlikely(rc)) {
+ /* Request a completion in next cycle */
+ txq->cmpl_next = 1;
break;
- } else if ((nb_tx_pkts & db_mask) != last_db_mask) {
- B_TX_DB(txq->tx_ring->tx_doorbell,
- txq->tx_ring->tx_prod);
- last_db_mask = nb_tx_pkts & db_mask;
}
}
+
if (nb_tx_pkts)
B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod);
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 15c7e5a0..7f3c7cdb 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -45,10 +45,20 @@ int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)
#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
diff --git a/drivers/net/bnxt/bnxt_util.c b/drivers/net/bnxt/bnxt_util.c
new file mode 100644
index 00000000..7d334271
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_util.c
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+
+#include "bnxt_util.h"
+
+int bnxt_check_zero_bytes(const uint8_t *bytes, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (bytes[i] != 0x00)
+ return 0;
+ return 1;
+}
diff --git a/drivers/net/bnxt/bnxt_util.h b/drivers/net/bnxt/bnxt_util.h
new file mode 100644
index 00000000..2378833c
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_util.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_UTIL_H_
+#define _BNXT_UTIL_H_
+
+int bnxt_check_zero_bytes(const uint8_t *bytes, int len);
+
+#endif /* _BNXT_UTIL_H_ */
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 19d06af5..c0577cd7 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -39,7 +39,7 @@ void bnxt_init_vnics(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
uint16_t max_vnics;
- int i, j;
+ int i;
max_vnics = bp->max_vnics;
STAILQ_INIT(&bp->free_vnic_list);
@@ -52,9 +52,6 @@ void bnxt_init_vnics(struct bnxt *bp)
vnic->hash_mode =
HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
- for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
- vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE;
-
prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
STAILQ_INIT(&vnic->filter);
STAILQ_INIT(&vnic->flow_list);
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index c521d7e5..9029f78c 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -15,13 +15,9 @@ struct bnxt_vnic_info {
uint16_t fw_vnic_id; /* returned by Chimp during alloc */
uint16_t rss_rule;
-#define MAX_NUM_TRAFFIC_CLASSES 8
-#define MAX_NUM_RSS_QUEUES_PER_VNIC 16
-#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \
- MAX_NUM_TRAFFIC_CLASSES)
uint16_t start_grp_id;
uint16_t end_grp_id;
- uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC];
+ uint16_t *fw_grp_ids;
uint16_t dflt_ring_grp;
uint16_t mru;
uint16_t hash_type;
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index fd6d8807..f5c7b422 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -686,8 +686,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MINOR 9
#define HWRM_VERSION_UPDATE 2
/* non-zero means beta version */
-#define HWRM_VERSION_RSVD 6
-#define HWRM_VERSION_STR "1.9.2.6"
+#define HWRM_VERSION_RSVD 9
+#define HWRM_VERSION_STR "1.9.2.9"
/****************
* hwrm_ver_get *
@@ -3183,6 +3183,9 @@ struct hwrm_async_event_cmpl {
/* LLFC/PFC Configuration Change */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE \
UINT32_C(0x34)
+ /* Default VNIC Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE \
+ UINT32_C(0x35)
/* HWRM Error */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \
UINT32_C(0xff)
@@ -3280,6 +3283,11 @@ struct hwrm_async_event_cmpl_link_status_change {
UINT32_C(0xffff0)
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT \
4
+ /* Indicates the physical function this event occured on. */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK \
+ UINT32_C(0xff00000)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT \
+ 20
} __attribute__((packed));
/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */
@@ -4087,6 +4095,10 @@ struct hwrm_async_event_cmpl_vf_flr {
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK \
UINT32_C(0xffff)
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+ /* Indicates the physical function this event occured on. */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK \
+ UINT32_C(0xff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16
} __attribute__((packed));
/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */
@@ -4354,6 +4366,88 @@ struct hwrm_async_event_cmpl_llfc_pfc_change {
5
} __attribute__((packed));
+/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_default_vnic_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* unused1 is 10 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK \
+ UINT32_C(0xffc0)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT \
+ 6
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Notification of a default vnic allocaiton or free */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION \
+ UINT32_C(0x35)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates default vnic configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK \
+ UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT \
+ 0
+ /*
+ * If this field is set to 1, then it indicates that
+ * a default VNIC has been allocate.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC \
+ UINT32_C(0x1)
+ /*
+ * If this field is set to 2, then it indicates that
+ * a default VNIC has been freed.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
+ /* Indicates the physical function this event occured on. */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK \
+ UINT32_C(0x3fc)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT \
+ 2
+ /* Indicates the virtual function this event occured on */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK \
+ UINT32_C(0x3fffc00)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT \
+ 10
+} __attribute__((packed));
+
/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
struct hwrm_async_event_cmpl_hwrm_error {
uint16_t type;
@@ -5197,6 +5291,21 @@ struct hwrm_func_qcaps_output {
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED \
UINT32_C(0x10000)
/*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to adopt the VF's belonging
+ * to another PF.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADOPTED_PF_SUPPORTED \
+ UINT32_C(0x20000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to administer another PF.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED \
+ UINT32_C(0x40000)
+ /*
* This value is current MAC address configured for this
* function. A value of 00-00-00-00-00-00 indicates no
* MAC address is currently configured.
diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build
new file mode 100644
index 00000000..e130f271
--- /dev/null
+++ b/drivers/net/bnxt/meson.build
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+install_headers('rte_pmd_bnxt.h')
+version = 2
+sources = files('bnxt_cpr.c',
+ 'bnxt_ethdev.c',
+ 'bnxt_filter.c',
+ 'bnxt_flow.c',
+ 'bnxt_hwrm.c',
+ 'bnxt_irq.c',
+ 'bnxt_ring.c',
+ 'bnxt_rxq.c',
+ 'bnxt_rxr.c',
+ 'bnxt_stats.c',
+ 'bnxt_txq.c',
+ 'bnxt_txr.c',
+ 'bnxt_util.c',
+ 'bnxt_vnic.c',
+ 'rte_pmd_bnxt.c')
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index d558df8b..8bc04cfd 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -345,14 +345,6 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
internals->tx_queue_offload_capa &= dev_info.tx_queue_offload_capa;
internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
- if (link_properties_valid(bonded_eth_dev,
- &slave_eth_dev->data->dev_link) != 0) {
- RTE_BOND_LOG(ERR, "Invalid link properties for slave %d"
- " in bonding mode %d", slave_port_id,
- internals->mode);
- return -1;
- }
-
/* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
* the power of 2, the lower one is GCD
*/
@@ -373,6 +365,13 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
return -1;
}
+ /* Add additional MAC addresses to the slave */
+ if (slave_add_mac_addresses(bonded_eth_dev, slave_port_id) != 0) {
+ RTE_BOND_LOG(ERR, "Failed to add mac address(es) to slave %hu",
+ slave_port_id);
+ return -1;
+ }
+
internals->slave_count++;
if (bonded_eth_dev->data->dev_started) {
@@ -387,7 +386,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
/* Add slave details to bonded device */
slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
- /* Update all slave devices MACs*/
+ /* Update all slave devices MACs */
mac_address_slaves_update(bonded_eth_dev);
/* Register link status change callback with bonded device pointer as
@@ -405,11 +404,6 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
!internals->user_defined_primary_port)
bond_ethdev_primary_set(internals,
slave_port_id);
-
- if (find_slave_by_id(internals->active_slaves,
- internals->active_slave_count,
- slave_port_id) == internals->active_slave_count)
- activate_slave(bonded_eth_dev, slave_port_id);
}
}
@@ -491,6 +485,9 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
rte_eth_dev_default_mac_addr_set(slave_port_id,
&(internals->slaves[slave_idx].persisted_mac_addr));
+ /* remove additional MAC addresses from the slave */
+ slave_remove_mac_addresses(bonded_eth_dev, slave_port_id);
+
/*
* Remove bond device flows from slave device.
* Note: don't restore flow isolate mode.
@@ -716,9 +713,21 @@ rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
internals->user_defined_mac = 0;
if (internals->slave_count > 0) {
+ int slave_port;
+ /* Get the primary slave location based on the primary port
+ * number as, while slave_add(), we will keep the primary
+ * slave based on slave_count,but not based on the primary port.
+ */
+ for (slave_port = 0; slave_port < internals->slave_count;
+ slave_port++) {
+ if (internals->slaves[slave_port].port_id ==
+ internals->primary_port)
+ break;
+ }
+
/* Set MAC Address of Bonded Device */
if (mac_address_set(bonded_eth_dev,
- &internals->slaves[internals->primary_port].persisted_mac_addr)
+ &internals->slaves[slave_port].persisted_mac_addr)
!= 0) {
RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
return -1;
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 02d94b1b..58f7377c 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -25,6 +25,7 @@
#define REORDER_PERIOD_MS 10
#define DEFAULT_POLLING_INTERVAL_10_MS (10)
+#define BOND_MAX_MAC_ADDRS 16
#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
@@ -1588,6 +1589,61 @@ mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
return 0;
}
+static const struct ether_addr null_mac_addr;
+
+/*
+ * Add additional MAC addresses to the slave
+ */
+int
+slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id)
+{
+ int i, ret;
+ struct ether_addr *mac_addr;
+
+ for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
+ mac_addr = &bonded_eth_dev->data->mac_addrs[i];
+ if (is_same_ether_addr(mac_addr, &null_mac_addr))
+ break;
+
+ ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
+ if (ret < 0) {
+ /* rollback */
+ for (i--; i > 0; i--)
+ rte_eth_dev_mac_addr_remove(slave_port_id,
+ &bonded_eth_dev->data->mac_addrs[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Remove additional MAC addresses from the slave
+ */
+int
+slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id)
+{
+ int i, rc, ret;
+ struct ether_addr *mac_addr;
+
+ rc = 0;
+ for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
+ mac_addr = &bonded_eth_dev->data->mac_addrs[i];
+ if (is_same_ether_addr(mac_addr, &null_mac_addr))
+ break;
+
+ ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
+ /* save only the first error */
+ if (ret < 0 && rc == 0)
+ rc = ret;
+ }
+
+ return rc;
+}
+
int
mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
{
@@ -2057,10 +2113,6 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
}
}
- /* Update all slave devices MACs*/
- if (mac_address_slaves_update(eth_dev) != 0)
- goto out_err;
-
/* If bonded device is configure in promiscuous mode then re-apply config */
if (internals->promiscuous_en)
bond_ethdev_promiscuous_enable(eth_dev);
@@ -2101,6 +2153,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
(void *)&rte_eth_devices[internals->port_id]);
}
+ /* Update all slave devices MACs*/
+ if (mac_address_slaves_update(eth_dev) != 0)
+ goto out_err;
+
if (internals->user_defined_primary_port)
bond_ethdev_primary_set(internals, internals->primary_port);
@@ -2173,7 +2229,6 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
tlb_last_obytets[internals->active_slaves[i]] = 0;
}
- internals->active_slave_count = 0;
internals->link_status_polling_enabled = 0;
for (i = 0; i < internals->slave_count; i++)
internals->slaves[i].last_link_status = 0;
@@ -2219,7 +2274,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
uint16_t max_nb_rx_queues = UINT16_MAX;
uint16_t max_nb_tx_queues = UINT16_MAX;
- dev_info->max_mac_addrs = 1;
+ dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
internals->candidate_max_rx_pktlen :
@@ -2664,10 +2719,8 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
rte_eth_link_get_nowait(port_id, &link);
if (link.link_status) {
- if (active_pos < internals->active_slave_count) {
- rte_spinlock_unlock(&internals->lsc_lock);
- return rc;
- }
+ if (active_pos < internals->active_slave_count)
+ goto link_update;
/* if no active slave ports then set this port to be primary port */
if (internals->active_slave_count < 1) {
@@ -2679,6 +2732,17 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
mac_address_slaves_update(bonded_eth_dev);
}
+ /* check link state properties if bonded link is up*/
+ if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+ if (link_properties_valid(bonded_eth_dev, &link) != 0)
+ RTE_BOND_LOG(ERR, "Invalid link properties "
+ "for slave %d in bonding mode %d",
+ port_id, internals->mode);
+ } else {
+ /* inherit slave link properties */
+ link_properties_set(bonded_eth_dev, &link);
+ }
+
activate_slave(bonded_eth_dev, port_id);
/* If user has defined the primary port then default to using it */
@@ -2686,10 +2750,8 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
internals->primary_port == port_id)
bond_ethdev_primary_set(internals, port_id);
} else {
- if (active_pos == internals->active_slave_count) {
- rte_spinlock_unlock(&internals->lsc_lock);
- return rc;
- }
+ if (active_pos == internals->active_slave_count)
+ goto link_update;
/* Remove from active slave list */
deactivate_slave(bonded_eth_dev, port_id);
@@ -2708,6 +2770,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
}
}
+link_update:
/**
* Update bonded device link properties after any change to active
* slaves
@@ -2745,7 +2808,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
rte_spinlock_unlock(&internals->lsc_lock);
- return 0;
+ return rc;
}
static int
@@ -2905,6 +2968,68 @@ bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
return -ENOTSUP;
}
+static int
+bond_ethdev_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ __rte_unused uint32_t index, uint32_t vmdq)
+{
+ struct rte_eth_dev *slave_eth_dev;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int ret, i;
+
+ rte_spinlock_lock(&internals->lock);
+
+ for (i = 0; i < internals->slave_count; i++) {
+ slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
+ if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
+ *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
+ ret = -ENOTSUP;
+ goto end;
+ }
+ }
+
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
+ mac_addr, vmdq);
+ if (ret < 0) {
+ /* rollback */
+ for (i--; i >= 0; i--)
+ rte_eth_dev_mac_addr_remove(
+ internals->slaves[i].port_id, mac_addr);
+ goto end;
+ }
+ }
+
+ ret = 0;
+end:
+ rte_spinlock_unlock(&internals->lock);
+ return ret;
+}
+
+static void
+bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev *slave_eth_dev;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+
+ rte_spinlock_lock(&internals->lock);
+
+ for (i = 0; i < internals->slave_count; i++) {
+ slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
+ if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
+ goto end;
+ }
+
+ struct ether_addr *mac_addr = &dev->data->mac_addrs[index];
+
+ for (i = 0; i < internals->slave_count; i++)
+ rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
+ mac_addr);
+
+end:
+ rte_spinlock_unlock(&internals->lock);
+}
+
const struct eth_dev_ops default_dev_ops = {
.dev_start = bond_ethdev_start,
.dev_stop = bond_ethdev_stop,
@@ -2927,6 +3052,8 @@ const struct eth_dev_ops default_dev_ops = {
.rss_hash_conf_get = bond_ethdev_rss_hash_conf_get,
.mtu_set = bond_ethdev_mtu_set,
.mac_addr_set = bond_ethdev_mac_address_set,
+ .mac_addr_add = bond_ethdev_mac_addr_add,
+ .mac_addr_remove = bond_ethdev_mac_addr_remove,
.filter_ctrl = bond_filter_ctrl
};
@@ -2954,10 +3081,13 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
eth_dev->data->nb_rx_queues = (uint16_t)1;
eth_dev->data->nb_tx_queues = (uint16_t)1;
- eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
- socket_id);
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN *
+ BOND_MAX_MAC_ADDRS, 0, socket_id);
if (eth_dev->data->mac_addrs == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
+ RTE_BOND_LOG(ERR,
+ "Failed to allocate %u bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
goto err;
}
@@ -3065,6 +3195,7 @@ bond_probe(struct rte_vdev_device *dev)
}
/* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &default_dev_ops;
+ eth_dev->device = &dev->device;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -3119,6 +3250,7 @@ bond_probe(struct rte_vdev_device *dev)
internals = rte_eth_devices[port_id].data->dev_private;
internals->kvlist = kvlist;
+ rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
if (rte_kvargs_process(kvlist,
@@ -3138,7 +3270,6 @@ bond_probe(struct rte_vdev_device *dev)
rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE);
}
- rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
"socket %u.", name, port_id, bonding_mode, socket_id);
return 0;
@@ -3485,9 +3616,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
int bond_logtype;
-RTE_INIT(bond_init_log);
-static void
-bond_init_log(void)
+RTE_INIT(bond_init_log)
{
bond_logtype = rte_log_register("pmd.net.bon");
if (bond_logtype >= 0)
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index 65445b86..43e0e448 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -231,6 +231,14 @@ int
mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev);
int
+slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id);
+
+int
+slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id);
+
+int
bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode);
int
diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile
index 79fdb6f0..5d66c4b3 100644
--- a/drivers/net/cxgbe/Makefile
+++ b/drivers/net/cxgbe/Makefile
@@ -49,7 +49,10 @@ SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_main.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += clip_tbl.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index 55cb2e91..e98dd218 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -11,12 +11,16 @@
#include <rte_bus_pci.h>
#include <rte_mbuf.h>
#include <rte_io.h>
+#include <rte_rwlock.h>
+#include <rte_ethdev.h>
#include "cxgbe_compat.h"
#include "t4_regs_values.h"
+#include "cxgbe_ofld.h"
enum {
MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */
+ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
};
struct adapter;
@@ -254,10 +258,20 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
unsigned int flags; /* flags for state of the queue */
} __rte_cache_aligned;
+struct sge_ctrl_txq { /* State for an SGE control Tx queue */
+ struct sge_txq q; /* txq */
+ struct adapter *adapter; /* adapter associated with this queue */
+ rte_spinlock_t ctrlq_lock; /* control queue lock */
+ u8 full; /* the Tx ring is full */
+ u64 txp; /* number of transmits */
+ struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */
+} __rte_cache_aligned;
+
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_rspq fw_evtq __rte_cache_aligned;
+ struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
u16 max_ethqsets; /* # of available Ethernet queue sets */
u32 stat_len; /* length of status page at ring end */
@@ -306,9 +320,54 @@ struct adapter {
unsigned int vpd_flag;
int use_unpacked_mode; /* unpacked rx mode state */
+ rte_spinlock_t win0_lock;
+
+ unsigned int clipt_start; /* CLIP table start */
+ unsigned int clipt_end; /* CLIP table end */
+ struct clip_tbl *clipt; /* CLIP table */
+
+ struct tid_info tids; /* Info used to access TID related tables */
};
/**
+ * t4_os_rwlock_init - initialize rwlock
+ * @lock: the rwlock
+ */
+static inline void t4_os_rwlock_init(rte_rwlock_t *lock)
+{
+ rte_rwlock_init(lock);
+}
+
+/**
+ * t4_os_write_lock - get a write lock
+ * @lock: the rwlock
+ */
+static inline void t4_os_write_lock(rte_rwlock_t *lock)
+{
+ rte_rwlock_write_lock(lock);
+}
+
+/**
+ * t4_os_write_unlock - unlock a write lock
+ * @lock: the rwlock
+ */
+static inline void t4_os_write_unlock(rte_rwlock_t *lock)
+{
+ rte_rwlock_write_unlock(lock);
+}
+
+/**
+ * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev
+ * @dev: the rte_eth_dev
+ *
+ * Return the struct port_info associated with a rte_eth_dev
+ */
+static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev)
+{
+ return (struct port_info *)dev->data->dev_private;
+}
+
+/**
* adap2pinfo - return the port_info of a port
* @adap: the adapter
* @idx: the port index
@@ -320,6 +379,17 @@ static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx)
return adap->port[idx];
}
+/**
+ * ethdev2adap - return the adapter structure associated with a rte_eth_dev
+ * @dev: the rte_eth_dev
+ *
+ * Return the struct adapter associated with a rte_eth_dev
+ */
+static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev)
+{
+ return ethdev2pinfo(dev)->adapter;
+}
+
#define CXGBE_PCI_REG(reg) rte_read32(reg)
static inline uint64_t cxgbe_read_addr64(volatile void *addr)
@@ -680,6 +750,38 @@ static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
t4_os_unlock(lock);
}
+/**
+ * t4_init_completion - initialize completion
+ * @c: the completion context
+ */
+static inline void t4_init_completion(struct t4_completion *c)
+{
+ c->done = 0;
+ t4_os_lock_init(&c->lock);
+}
+
+/**
+ * t4_complete - set completion as done
+ * @c: the completion context
+ */
+static inline void t4_complete(struct t4_completion *c)
+{
+ t4_os_lock(&c->lock);
+ c->done = 1;
+ t4_os_unlock(&c->lock);
+}
+
+/**
+ * cxgbe_port_viid - get the VI id of a port
+ * @dev: the device for the port
+ *
+ * Return the VI id of the given port.
+ */
+static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev)
+{
+ return ethdev2pinfo(dev)->viid;
+}
+
void *t4_alloc_mem(size_t size);
void t4_free_mem(void *addr);
#define t4_os_alloc(_size) t4_alloc_mem((_size))
@@ -694,6 +796,7 @@ void t4_sge_tx_monitor_start(struct adapter *adap);
void t4_sge_tx_monitor_stop(struct adapter *adap);
int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
uint16_t nb_pkts);
+int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_sge_init(struct adapter *adap);
@@ -701,6 +804,9 @@ int t4vf_sge_init(struct adapter *adap);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct rte_eth_dev *eth_dev, uint16_t queue_id,
unsigned int iqid, int socket_id);
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id);
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
struct rte_eth_dev *eth_dev, int intr_idx,
struct sge_fl *fl, rspq_handler_t handler,
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index 155a3028..157201da 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -18,6 +18,9 @@ extern "C" {
#define CXGBE_PAGE_SIZE RTE_PGSIZE_4K
+#define T4_MEMORY_WRITE 0
+#define T4_MEMORY_READ 1
+
enum {
MAX_NPORTS = 4, /* max # of ports */
};
@@ -47,6 +50,8 @@ enum cc_fec {
FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */
};
+enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
+
struct port_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_frames; /* all good frames */
@@ -151,6 +156,9 @@ struct tp_params {
int vnic_shift;
int port_shift;
int protocol_shift;
+ int ethertype_shift;
+
+ u64 hash_filter_mask;
};
struct vpd_params {
@@ -203,6 +211,14 @@ struct rss_params {
};
/*
+ * Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int neq; /* N egress Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+};
+
+/*
* Maximum resources provisioned for a PCI VF.
*/
struct vf_resources {
@@ -225,6 +241,7 @@ struct adapter_params {
struct pci_params pci;
struct devlog_params devlog;
struct rss_params rss;
+ struct pf_resources pfres;
struct vf_resources vfres;
enum pcie_memwin drv_memwin;
@@ -246,6 +263,8 @@ struct adapter_params {
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
+ unsigned char hash_filter;
+
enum chip_type chip; /* chip code */
struct arch_specific_params arch; /* chip specific params */
@@ -309,6 +328,11 @@ static inline int is_pf4(struct adapter *adap)
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
+static inline int is_hashfilter(const struct adapter *adap)
+{
+ return adap->params.hash_filter;
+}
+
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val);
@@ -378,6 +402,8 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int fl0id, unsigned int fl1id);
int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
@@ -449,6 +475,7 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int nregs, unsigned int start_idx);
int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_pfres(struct adapter *adapter);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_flash_cfg_addr(struct adapter *adapter);
@@ -500,5 +527,15 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
int t4_seeprom_wp(struct adapter *adapter, int enable);
+int t4_memory_rw_addr(struct adapter *adap, int win,
+ u32 addr, u32 len, void *hbuf, int dir);
+int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
+ u32 len, void *hbuf, int dir);
+static inline int t4_memory_rw(struct adapter *adap, int win,
+ int mtype, u32 maddr, u32 len,
+ void *hbuf, int dir)
+{
+ return t4_memory_rw_mtype(adap, win, mtype, maddr, len, hbuf, dir);
+}
fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index e5ef73b6..31762c9c 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -2480,6 +2480,46 @@ int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
return 0;
}
+/**
+ * t4_get_pfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a physical
+ * function. The results are stored in @adapter->pfres.
+ */
+int t4_get_pfres(struct adapter *adapter)
+{
+ struct pf_resources *pfres = &adapter->params.pfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ u32 word;
+ int v;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PFVF_CMD_PFN(adapter->pf) |
+ V_FW_PFVF_CMD_VFN(0));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Extract PF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ pfres->neq = G_FW_PFVF_CMD_NEQ(word);
+ return 0;
+}
+
/* serial flash and firmware constants and flash config file constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -4491,6 +4531,31 @@ static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
}
/**
+ * t4_ctrl_eq_free - free a control egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ctrl_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(pf) |
+ V_FW_EQ_CTRL_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+ c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
* t4_handle_fw_rpl - process a FW reply message
* @adap: the adapter
* @rpl: start of the FW message
@@ -4616,9 +4681,8 @@ struct flash_desc {
int t4_get_flash_params(struct adapter *adapter)
{
/*
- * Table for non-Numonix supported flash parts. Numonix parts are left
- * to the preexisting well-tested code. All flash parts have 64KB
- * sectors.
+ * Table for non-standard supported Flash parts. Note, all Flash
+ * parts must have 64KB sectors.
*/
static struct flash_desc supported_flash[] = {
{ 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
@@ -4627,7 +4691,7 @@ int t4_get_flash_params(struct adapter *adapter)
int ret;
u32 flashid = 0;
unsigned int part, manufacturer;
- unsigned int density, size;
+ unsigned int density, size = 0;
/**
* Issue a Read ID Command to the Flash part. We decode supported
@@ -4642,6 +4706,9 @@ int t4_get_flash_params(struct adapter *adapter)
if (ret < 0)
return ret;
+ /**
+ * Check to see if it's one of our non-standard supported Flash parts.
+ */
for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
if (supported_flash[part].vendor_and_model_id == flashid) {
adapter->params.sf_size =
@@ -4652,6 +4719,15 @@ int t4_get_flash_params(struct adapter *adapter)
}
}
+ /**
+ * Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
manufacturer = flashid & 0xff;
switch (manufacturer) {
case 0x20: { /* Micron/Numonix */
@@ -4688,21 +4764,81 @@ int t4_get_flash_params(struct adapter *adapter)
case 0x22:
size = 1 << 28; /* 256MB */
break;
- default:
- dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
+ break;
+ }
- adapter->params.sf_size = size;
- adapter->params.sf_nsec = size / SF_SEC_SIZE;
+ case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
+ /**
+ * This Density -> Size decoding table is taken from ISSI
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x16:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x17:
+ size = 1 << 26; /* 64MB */
+ break;
+ }
break;
}
- default:
- dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
- return -EINVAL;
+
+ case 0xc2: { /* Macronix */
+ /**
+ * This Density -> Size decoding table is taken from Macronix
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
}
+ case 0xef: { /* Winbond */
+ /**
+ * This Density -> Size decoding table is taken from Winbond
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
+ }
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ dev_warn(adapter,
+ "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
+ }
+
+ /**
+ * Store decoded Flash size and fall through into vetting code.
+ */
+ adapter->params.sf_size = size;
+ adapter->params.sf_nsec = size / SF_SEC_SIZE;
+
found:
/*
* We should reject adapters with FLASHes which are too small. So, emit
@@ -5007,6 +5143,8 @@ int t4_init_tp_params(struct adapter *adap)
adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
F_PROTOCOL);
+ adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
+ F_ETHERTYPE);
/*
* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
@@ -5015,6 +5153,11 @@ int t4_init_tp_params(struct adapter *adap)
if ((adap->params.tp.ingress_config & F_VNIC) == 0)
adap->params.tp.vnic_shift = -1;
+ v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask = v;
+ v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask |= ((u64)v << 32);
+
return 0;
}
@@ -5190,3 +5333,212 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
}
return 0;
}
+
+/**
+ * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @addr: address within adapter memory
+ * @len: amount of memory to transfer
+ * @hbuf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address and host buffer must be aligned on 32-bit
+ * boudaries; the length may be arbitrary.
+ *
+ * NOTES:
+ * 1. The memory is transferred as a raw byte sequence from/to the
+ * firmware's memory. If this memory contains data structures which
+ * contain multi-byte integers, it's the caller's responsibility to
+ * perform appropriate byte order conversions.
+ *
+ * 2. It is the Caller's responsibility to ensure that no other code
+ * uses the specified PCI-E Memory Window while this routine is
+ * using it. This is typically done via the use of OS-specific
+ * locks, etc.
+ */
+int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
+ u32 len, void *hbuf, int dir)
+{
+ u32 pos, offset, resid;
+ u32 win_pf, mem_reg, mem_aperture, mem_base;
+ u32 *buf;
+
+ /* Argument sanity checks ...*/
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
+ return -EINVAL;
+ buf = (u32 *)hbuf;
+
+ /* It's convenient to be able to handle lengths which aren't a
+ * multiple of 32-bits because we often end up transferring files to
+ * the firmware. So we'll handle that by normalizing the length here
+ * and then handling any residual transfer at the end.
+ */
+ resid = len & 0x3;
+ len -= resid;
+
+ /* Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ win));
+ mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
+ mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
+
+ win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
+
+ /* Calculate our initial PCI-E Memory Window Position and Offset into
+ * that Window.
+ */
+ pos = addr & ~(mem_aperture - 1);
+ offset = addr - pos;
+
+ /* Set up initial PCI-E Memory Window to cover the start of our
+ * transfer. (Read it back to ensure that changes propagate before we
+ * attempt to use the new value.)
+ */
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
+ pos | win_pf);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
+
+ /* Transfer data to/from the adapter as long as there's an integral
+ * number of 32-bit transfers to complete.
+ *
+ * A note on Endianness issues:
+ *
+ * The "register" reads and writes below from/to the PCI-E Memory
+ * Window invoke the standard adapter Big-Endian to PCI-E Link
+ * Little-Endian "swizzel." As a result, if we have the following
+ * data in adapter memory:
+ *
+ * Memory: ... | b0 | b1 | b2 | b3 | ...
+ * Address: i+0 i+1 i+2 i+3
+ *
+ * Then a read of the adapter memory via the PCI-E Memory Window
+ * will yield:
+ *
+ * x = readl(i)
+ * 31 0
+ * [ b3 | b2 | b1 | b0 ]
+ *
+ * If this value is stored into local memory on a Little-Endian system
+ * it will show up correctly in local memory as:
+ *
+ * ( ..., b0, b1, b2, b3, ... )
+ *
+ * But on a Big-Endian system, the store will show up in memory
+ * incorrectly swizzled as:
+ *
+ * ( ..., b3, b2, b1, b0, ... )
+ *
+ * So we need to account for this in the reads and writes to the
+ * PCI-E Memory Window below by undoing the register read/write
+ * swizzels.
+ */
+ while (len > 0) {
+ if (dir == T4_MEMORY_READ)
+ *buf++ = le32_to_cpu((__le32)t4_read_reg(adap,
+ mem_base +
+ offset));
+ else
+ t4_write_reg(adap, mem_base + offset,
+ (u32)cpu_to_le32(*buf++));
+ offset += sizeof(__be32);
+ len -= sizeof(__be32);
+
+ /* If we've reached the end of our current window aperture,
+ * move the PCI-E Memory Window on to the next. Note that
+ * doing this here after "len" may be 0 allows us to set up
+ * the PCI-E Memory Window for a possible final residual
+ * transfer below ...
+ */
+ if (offset == mem_aperture) {
+ pos += mem_aperture;
+ offset = 0;
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+ win), pos | win_pf);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+ win));
+ }
+ }
+
+ /* If the original transfer had a length which wasn't a multiple of
+ * 32-bits, now's where we need to finish off the transfer of the
+ * residual amount. The PCI-E Memory Window has already been moved
+ * above (if necessary) to cover this final transfer.
+ */
+ if (resid) {
+ union {
+ u32 word;
+ char byte[4];
+ } last;
+ unsigned char *bp;
+ int i;
+
+ if (dir == T4_MEMORY_READ) {
+ last.word = le32_to_cpu((__le32)t4_read_reg(adap,
+ mem_base +
+ offset));
+ for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
+ bp[i] = last.byte[i];
+ } else {
+ last.word = *buf;
+ for (i = resid; i < 4; i++)
+ last.byte[i] = 0;
+ t4_write_reg(adap, mem_base + offset,
+ (u32)cpu_to_le32(last.word));
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @maddr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @hbuf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
+ * provides an (memory type, address within memory type) interface.
+ */
+int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
+ u32 len, void *hbuf, int dir)
+{
+ u32 mtype_offset;
+ u32 edc_size, mc_size;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
+ * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
+ */
+ edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1) {
+ mtype_offset = (mtype * (edc_size * 1024 * 1024));
+ } else {
+ mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
+ A_MA_EXT_MEMORY0_BAR));
+ mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ return t4_memory_rw_addr(adap, win,
+ mtype_offset + maddr, len,
+ hbuf, dir);
+}
diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h
index ac12afc0..e77563df 100644
--- a/drivers/net/cxgbe/base/t4_hw.h
+++ b/drivers/net/cxgbe/base/t4_hw.h
@@ -42,6 +42,10 @@ enum {
SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / SGE_EQ_IDXSIZE,
};
+enum {
+ TCB_SIZE = 128, /* TCB size */
+};
+
struct sge_qstat { /* data written to SGE queue status entries */
__be32 qid;
__be16 cidx;
diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h
index 74b4fc19..5d433c91 100644
--- a/drivers/net/cxgbe/base/t4_msg.h
+++ b/drivers/net/cxgbe/base/t4_msg.h
@@ -7,6 +7,15 @@
#define T4_MSG_H
enum {
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_TID_RELEASE = 0x1A,
+ CPL_ACT_OPEN_RPL = 0x25,
+ CPL_ABORT_RPL_RSS = 0x2D,
+ CPL_SET_TCB_RPL = 0x3A,
+ CPL_ACT_OPEN_REQ6 = 0x83,
CPL_SGE_EGR_UPDATE = 0xA5,
CPL_FW4_MSG = 0xC0,
CPL_FW6_MSG = 0xE0,
@@ -14,6 +23,20 @@ enum {
CPL_TX_PKT_XT = 0xEE,
};
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_FULL = 3,
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+};
+
enum { /* TX_PKT_XT checksum types */
TX_CSUM_TCPIP = 8,
TX_CSUM_UDPIP = 9,
@@ -25,6 +48,24 @@ union opcode_tid {
__u8 opcode;
};
+#define S_CPL_OPCODE 24
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_CPL_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(be32_to_cpu(OPCODE_TID(cmd))))
+
+/* partitioning of TID fields that also carry a queue id */
+#define S_TID_TID 0
+#define M_TID_TID 0x3fff
+#define G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID)
+
struct rss_header {
__u8 opcode;
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
@@ -66,6 +107,169 @@ struct work_request_hdr {
#define WR_HDR_SIZE 0
#endif
+#define S_COOKIE 5
+#define M_COOKIE 0x7
+#define V_COOKIE(x) ((x) << S_COOKIE)
+#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
+
+/* option 0 fields */
+#define S_TX_CHAN 2
+#define V_TX_CHAN(x) ((x) << S_TX_CHAN)
+
+#define S_DELACK 5
+#define V_DELACK(x) ((x) << S_DELACK)
+
+#define S_NON_OFFLOAD 7
+#define V_NON_OFFLOAD(x) ((x) << S_NON_OFFLOAD)
+#define F_NON_OFFLOAD V_NON_OFFLOAD(1U)
+
+#define S_ULP_MODE 8
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+
+#define S_SMAC_SEL 28
+#define V_SMAC_SEL(x) ((__u64)(x) << S_SMAC_SEL)
+
+#define S_TCAM_BYPASS 48
+#define V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS V_TCAM_BYPASS(1ULL)
+
+/* option 2 fields */
+#define S_RSS_QUEUE 0
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+
+#define S_RSS_QUEUE_VALID 10
+#define V_RSS_QUEUE_VALID(x) ((x) << S_RSS_QUEUE_VALID)
+#define F_RSS_QUEUE_VALID V_RSS_QUEUE_VALID(1U)
+
+#define S_CONG_CNTRL 14
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+
+#define S_RX_CHANNEL 26
+#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
+#define F_RX_CHANNEL V_RX_CHANNEL(1U)
+
+#define S_CCTRL_ECN 27
+#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
+
+#define S_T5_OPT_2_VALID 31
+#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
+#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U)
+
+struct cpl_t6_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+struct cpl_t6_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+#define S_FILTER_TUPLE 24
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+
+struct cpl_act_open_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 atid_status;
+};
+
+/* cpl_act_open_rpl.atid_status fields */
+#define S_AOPEN_STATUS 0
+#define M_AOPEN_STATUS 0xFF
+#define G_AOPEN_STATUS(x) (((x) >> S_AOPEN_STATUS) & M_AOPEN_STATUS)
+
+#define S_AOPEN_ATID 8
+#define M_AOPEN_ATID 0xFFFFFF
+#define G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID)
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+ __be64 mask;
+ __be64 val;
+};
+
+/* cpl_set_tcb_field.word_cookie fields */
+#define S_WORD 0
+#define V_WORD(x) ((x) << S_WORD)
+
+/* cpl_get_tcb.reply_ctrl fields */
+#define S_QUEUENO 0
+#define V_QUEUENO(x) ((x) << S_QUEUENO)
+
+#define S_REPLY_CHAN 14
+#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
+
+#define S_NO_REPLY 15
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+
+struct cpl_set_tcb_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __u8 cookie;
+ __u8 status;
+ __be64 oldval;
+};
+
+/* cpl_abort_req status command code
+ */
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
struct cpl_tx_data {
union opcode_tid ot;
__be32 len;
@@ -271,7 +475,13 @@ struct cpl_fw6_msg {
__be64 data[4];
};
+/* ULP_TX opcodes */
+enum {
+ ULP_TX_PKT = 4
+};
+
enum {
+ ULP_TX_SC_NOOP = 0x80,
ULP_TX_SC_IMM = 0x81,
ULP_TX_SC_DSGL = 0x82,
ULP_TX_SC_ISGL = 0x83
diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h
index c0d6ddca..6f872edc 100644
--- a/drivers/net/cxgbe/base/t4_regs.h
+++ b/drivers/net/cxgbe/base/t4_regs.h
@@ -458,6 +458,7 @@
#define F_CRXPKTENC V_CRXPKTENC(1U)
#define TP_BASE_ADDR 0x7d00
+#define A_TP_CMM_TCB_BASE 0x7d10
#define A_TP_TIMER_RESOLUTION 0x7d90
@@ -574,6 +575,21 @@
#define S_RM_OVLAN 9
#define V_RM_OVLAN(x) ((x) << S_RM_OVLAN)
+/* registers for module MA */
+#define A_MA_EDRAM0_BAR 0x77c0
+
+#define S_EDRAM0_SIZE 0
+#define M_EDRAM0_SIZE 0xfffU
+#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE)
+#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE)
+
+#define A_MA_EXT_MEMORY0_BAR 0x77c8
+
+#define S_EXT_MEM0_SIZE 0
+#define M_EXT_MEM0_SIZE 0xfffU
+#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE)
+#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE)
+
/* registers for module MPS */
#define MPS_BASE_ADDR 0x9000
#define T4VF_MPS_BASE_ADDR 0x0100
@@ -783,8 +799,11 @@
#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
+#define A_MPS_VF_STAT_TX_VF_MCAST_BYTES_L 0x90
#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_L 0x98
+#define A_MPS_VF_STAT_TX_VF_UCAST_BYTES_L 0xa0
#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_L 0xa8
#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_L 0xb0
#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_L 0xd0
@@ -921,3 +940,15 @@
#define M_REV 0xfU
#define V_REV(x) ((x) << S_REV)
#define G_REV(x) (((x) >> S_REV) & M_REV)
+
+/* registers for module LE */
+#define A_LE_DB_CONFIG 0x19c04
+
+#define S_HASHEN 20
+#define V_HASHEN(x) ((x) << S_HASHEN)
+#define F_HASHEN V_HASHEN(1U)
+
+#define A_LE_DB_TID_HASHBASE 0x19df8
+
+#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac
+#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0
diff --git a/drivers/net/cxgbe/base/t4_tcb.h b/drivers/net/cxgbe/base/t4_tcb.h
new file mode 100644
index 00000000..25435f9f
--- /dev/null
+++ b/drivers/net/cxgbe/base/t4_tcb.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _T4_TCB_DEFS_H
+#define _T4_TCB_DEFS_H
+
+/* 105:96 */
+#define W_TCB_RSS_INFO 3
+#define S_TCB_RSS_INFO 0
+#define M_TCB_RSS_INFO 0x3ffULL
+#define V_TCB_RSS_INFO(x) ((x) << S_TCB_RSS_INFO)
+
+/* 191:160 */
+#define W_TCB_TIMESTAMP 5
+#define S_TCB_TIMESTAMP 0
+#define M_TCB_TIMESTAMP 0xffffffffULL
+#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
+
+/* 223:192 */
+#define S_TCB_T_RTT_TS_RECENT_AGE 0
+#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
+#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
+
+#endif /* _T4_TCB_DEFS_H */
diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h
index 852e8f3c..e80b58a3 100644
--- a/drivers/net/cxgbe/base/t4fw_interface.h
+++ b/drivers/net/cxgbe/base/t4fw_interface.h
@@ -54,6 +54,9 @@ enum fw_memtype {
********************************/
enum fw_wr_opcodes {
+ FW_FILTER_WR = 0x02,
+ FW_ULPTX_WR = 0x04,
+ FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
FW_ETH_TX_PKTS_WR = 0x09,
FW_ETH_TX_PKT_VM_WR = 0x11,
@@ -76,6 +79,11 @@ struct fw_wr_hdr {
#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP)
#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP)
+/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER
+ */
+#define S_FW_WR_ATOMIC 23
+#define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC)
+
/* work request immediate data length (hi)
*/
#define S_FW_WR_IMMDLEN 0
@@ -92,6 +100,11 @@ struct fw_wr_hdr {
#define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ)
#define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U)
+/* flow context identifier (lo)
+ */
+#define S_FW_WR_FLOWID 8
+#define V_FW_WR_FLOWID(x) ((x) << S_FW_WR_FLOWID)
+
/* length in units of 16-bytes (lo)
*/
#define S_FW_WR_LEN16 0
@@ -143,6 +156,150 @@ struct fw_eth_tx_pkts_vm_wr {
__be16 vlantci;
};
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+ FW_FILTER_WR_SUCCESS,
+ FW_FILTER_WR_FLT_ADDED,
+ FW_FILTER_WR_FLT_DELETED,
+ FW_FILTER_WR_SMT_TBL_FULL,
+ FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+};
+
+#define S_FW_FILTER_WR_TID 12
+#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE 11
+#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
+
+#define S_FW_FILTER_WR_NOREPLY 10
+#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
+
+#define S_FW_FILTER_WR_IQ 0
+#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER 31
+#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID 25
+#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
+
+#define S_FW_FILTER_WR_DROP 24
+#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
+
+#define S_FW_FILTER_WR_DIRSTEER 23
+#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
+
+#define S_FW_FILTER_WR_MASKHASH 22
+#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH 21
+#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+
+#define S_FW_FILTER_WR_LPBK 20
+#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
+
+#define S_FW_FILTER_WR_DMAC 19
+#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
+
+#define S_FW_FILTER_WR_INSVLAN 17
+#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
+
+#define S_FW_FILTER_WR_RMVLAN 16
+#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
+
+#define S_FW_FILTER_WR_HITCNTS 15
+#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
+
+#define S_FW_FILTER_WR_TXCHAN 13
+#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO 12
+#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
+
+#define S_FW_FILTER_WR_L2TIX 0
+#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG 7
+#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
+
+#define S_FW_FILTER_WR_FRAGM 6
+#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
+
+#define S_FW_FILTER_WR_IVLAN_VLD 5
+#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
+
+#define S_FW_FILTER_WR_OVLAN_VLD 4
+#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM 3
+#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM 2
+#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+
+#define S_FW_FILTER_WR_RX_CHAN 15
+#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ 0
+#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI 23
+#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM 14
+#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE 13
+#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
+
+#define S_FW_FILTER_WR_FCOEM 12
+#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
+
+#define S_FW_FILTER_WR_PORT 9
+#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM 6
+#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE 3
+#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM 0
+#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+
/******************************************************************************
* C O M M A N D s
*********************/
@@ -178,6 +335,7 @@ enum fw_cmd_opcodes {
FW_PFVF_CMD = 0x09,
FW_IQ_CMD = 0x10,
FW_EQ_ETH_CMD = 0x12,
+ FW_EQ_CTRL_CMD = 0x13,
FW_VI_CMD = 0x14,
FW_VI_MAC_CMD = 0x15,
FW_VI_RXMODE_CMD = 0x16,
@@ -187,6 +345,7 @@ enum fw_cmd_opcodes {
FW_RSS_IND_TBL_CMD = 0x20,
FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_CLIP_CMD = 0x28,
FW_DEBUG_CMD = 0x81,
};
@@ -489,6 +648,10 @@ enum fw_params_mnem {
enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
+ FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
+ * allocated by the device's
+ * Lookup Engine
+ */
FW_PARAMS_PARAM_DEV_FWREV = 0x0B, /* fw version */
FW_PARAMS_PARAM_DEV_TPREV = 0x0C, /* tp version */
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
@@ -498,6 +661,10 @@ enum fw_params_param_dev {
* physical and virtual function parameters
*/
enum fw_params_param_pfvf {
+ FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
+ FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
+ FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
+ FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A
};
@@ -577,6 +744,12 @@ struct fw_pfvf_cmd {
__be32 r4;
};
+#define S_FW_PFVF_CMD_PFN 8
+#define V_FW_PFVF_CMD_PFN(x) ((x) << S_FW_PFVF_CMD_PFN)
+
+#define S_FW_PFVF_CMD_VFN 0
+#define V_FW_PFVF_CMD_VFN(x) ((x) << S_FW_PFVF_CMD_VFN)
+
#define S_FW_PFVF_CMD_NIQFLINT 20
#define M_FW_PFVF_CMD_NIQFLINT 0xfff
#define G_FW_PFVF_CMD_NIQFLINT(x) \
@@ -636,6 +809,11 @@ enum fw_iq_type {
FW_IQ_TYPE_FL_INT_CAP,
};
+enum fw_iq_iqtype {
+ FW_IQ_IQTYPE_NIC = 1,
+ FW_IQ_IQTYPE_OFLD,
+};
+
struct fw_iq_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
@@ -769,6 +947,9 @@ struct fw_iq_cmd {
(((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN)
#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U)
+#define S_FW_IQ_CMD_IQTYPE 24
+#define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE)
+
#define S_FW_IQ_CMD_FL0CNGCHMAP 20
#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf
#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP)
@@ -954,6 +1135,75 @@ struct fw_eq_eth_cmd {
#define G_FW_EQ_ETH_CMD_VIID(x) \
(((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID)
+struct fw_eq_ctrl_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 cmpliqid_eqid;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define S_FW_EQ_CTRL_CMD_PFN 8
+#define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN)
+
+#define S_FW_EQ_CTRL_CMD_VFN 0
+#define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN)
+
+#define S_FW_EQ_CTRL_CMD_ALLOC 31
+#define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC)
+#define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U)
+
+#define S_FW_EQ_CTRL_CMD_FREE 30
+#define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE)
+#define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U)
+
+#define S_FW_EQ_CTRL_CMD_EQSTART 28
+#define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART)
+#define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U)
+
+#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
+#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
+
+#define S_FW_EQ_CTRL_CMD_EQID 0
+#define M_FW_EQ_CTRL_CMD_EQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID)
+#define G_FW_EQ_CTRL_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID)
+
+#define S_FW_EQ_CTRL_CMD_PHYSEQID 0
+#define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID)
+#define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID)
+
+#define S_FW_EQ_CTRL_CMD_FETCHRO 22
+#define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO)
+#define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20
+#define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_CTRL_CMD_PCIECHN 16
+#define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN)
+
+#define S_FW_EQ_CTRL_CMD_IQID 0
+#define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID)
+
+#define S_FW_EQ_CTRL_CMD_FBMIN 23
+#define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN)
+
+#define S_FW_EQ_CTRL_CMD_FBMAX 20
+#define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX)
+
+#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16
+#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_CTRL_CMD_EQSIZE 0
+#define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE)
+
enum fw_vi_func {
FW_VI_FUNC_ETH,
};
@@ -1946,6 +2196,22 @@ struct fw_rss_vi_config_cmd {
(((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN)
#define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U)
+struct fw_clip_cmd {
+ __be32 op_to_write;
+ __be32 alloc_to_len16;
+ __be64 ip_hi;
+ __be64 ip_lo;
+ __be32 r4[2];
+};
+
+#define S_FW_CLIP_CMD_ALLOC 31
+#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC)
+#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U)
+
+#define S_FW_CLIP_CMD_FREE 30
+#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE)
+#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U)
+
/******************************************************************************
* D E B U G C O M M A N D s
******************************************************/
diff --git a/drivers/net/cxgbe/base/t4vf_hw.c b/drivers/net/cxgbe/base/t4vf_hw.c
index 9fd0b879..d96456bb 100644
--- a/drivers/net/cxgbe/base/t4vf_hw.c
+++ b/drivers/net/cxgbe/base/t4vf_hw.c
@@ -683,6 +683,9 @@ static int t4vf_get_port_stats_fw(struct adapter *adapter, int pidx,
/*
* Translate firmware statistics into host native statistics.
*/
+ p->tx_octets = be64_to_cpu(fwstats.tx_bcast_bytes) +
+ be64_to_cpu(fwstats.tx_mcast_bytes) +
+ be64_to_cpu(fwstats.tx_ucast_bytes);
p->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
p->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
p->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
@@ -722,6 +725,9 @@ void t4vf_get_port_stats(struct adapter *adapter, int pidx,
#define GET_STAT(name) \
t4_read_reg64(adapter, \
T4VF_MPS_BASE_ADDR + A_MPS_VF_STAT_##name##_L)
+ p->tx_octets = GET_STAT(TX_VF_BCAST_BYTES) +
+ GET_STAT(TX_VF_MCAST_BYTES) +
+ GET_STAT(TX_VF_UCAST_BYTES);
p->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
p->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
p->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c
new file mode 100644
index 00000000..5e4dc527
--- /dev/null
+++ b/drivers/net/cxgbe/clip_tbl.c
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include "common.h"
+#include "clip_tbl.h"
+
+/**
+ * Allocate clip entry in HW with associated IPV4/IPv6 address
+ */
+static int clip6_get_mbox(const struct rte_eth_dev *dev, const u32 *lip)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct fw_clip_cmd c;
+ u64 hi = ((u64)lip[1]) << 32 | lip[0];
+ u64 lo = ((u64)lip[3]) << 32 | lip[2];
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+ c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
+ c.ip_hi = hi;
+ c.ip_lo = lo;
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+/**
+ * Delete clip entry in HW having the associated IPV4/IPV6 address
+ */
+static int clip6_release_mbox(const struct rte_eth_dev *dev, const u32 *lip)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct fw_clip_cmd c;
+ u64 hi = ((u64)lip[1]) << 32 | lip[0];
+ u64 lo = ((u64)lip[3]) << 32 | lip[2];
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
+ c.ip_hi = hi;
+ c.ip_lo = lo;
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+/**
+ * cxgbe_clip_release - Release associated CLIP entry
+ * @ce: clip entry to release
+ *
+ * Releases ref count and frees up a clip entry from CLIP table
+ */
+void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
+{
+ int ret;
+
+ t4_os_lock(&ce->lock);
+ if (rte_atomic32_dec_and_test(&ce->refcnt)) {
+ ret = clip6_release_mbox(dev, ce->addr);
+ if (ret)
+ dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
+ }
+ t4_os_unlock(&ce->lock);
+}
+
+/**
+ * find_or_alloc_clipe - Find/Allocate a free CLIP entry
+ * @c: CLIP table
+ * @lip: IPV4/IPV6 address to compare/add
+ * Returns pointer to the IPV4/IPV6 entry found/created
+ *
+ * Finds/Allocates an CLIP entry to be used for a filter rule.
+ */
+static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
+ const u32 *lip)
+{
+ struct clip_entry *end, *e;
+ struct clip_entry *first_free = NULL;
+ unsigned int clipt_size = c->clipt_size;
+
+ for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
+ if (rte_atomic32_read(&e->refcnt) == 0) {
+ if (!first_free)
+ first_free = e;
+ } else {
+ if (memcmp(lip, e->addr, sizeof(e->addr)) == 0)
+ goto exists;
+ }
+ }
+
+ if (first_free) {
+ e = first_free;
+ goto exists;
+ }
+
+ return NULL;
+
+exists:
+ return e;
+}
+
+static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
+ u32 *lip, u8 v6)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct clip_tbl *ctbl = adap->clipt;
+ struct clip_entry *ce;
+ int ret = 0;
+
+ if (!ctbl)
+ return NULL;
+
+ t4_os_write_lock(&ctbl->lock);
+ ce = find_or_alloc_clipe(ctbl, lip);
+ if (ce) {
+ t4_os_lock(&ce->lock);
+ if (!rte_atomic32_read(&ce->refcnt)) {
+ rte_memcpy(ce->addr, lip, sizeof(ce->addr));
+ if (v6) {
+ ce->type = FILTER_TYPE_IPV6;
+ rte_atomic32_set(&ce->refcnt, 1);
+ ret = clip6_get_mbox(dev, lip);
+ if (ret)
+ dev_debug(adap,
+ "CLIP FW ADD CMD failed: %d",
+ ret);
+ } else {
+ ce->type = FILTER_TYPE_IPV4;
+ }
+ } else {
+ rte_atomic32_inc(&ce->refcnt);
+ }
+ t4_os_unlock(&ce->lock);
+ }
+ t4_os_write_unlock(&ctbl->lock);
+
+ return ret ? NULL : ce;
+}
+
+/**
+ * cxgbe_clip_alloc - Allocate a IPV6 CLIP entry
+ * @dev: rte_eth_dev pointer
+ * @lip: IPV6 address to add
+ * Returns pointer to the CLIP entry created
+ *
+ * Allocates a IPV6 CLIP entry to be used for a filter rule.
+ */
+struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip)
+{
+ return t4_clip_alloc(dev, lip, FILTER_TYPE_IPV6);
+}
+
+/**
+ * Initialize CLIP Table
+ */
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end)
+{
+ unsigned int clipt_size;
+ struct clip_tbl *ctbl;
+ unsigned int i;
+
+ if (clipt_start >= clipt_end)
+ return NULL;
+
+ clipt_size = clipt_end - clipt_start + 1;
+
+ ctbl = t4_os_alloc(sizeof(*ctbl) +
+ clipt_size * sizeof(struct clip_entry));
+ if (!ctbl)
+ return NULL;
+
+ ctbl->clipt_start = clipt_start;
+ ctbl->clipt_size = clipt_size;
+
+ t4_os_rwlock_init(&ctbl->lock);
+
+ for (i = 0; i < ctbl->clipt_size; i++) {
+ t4_os_lock_init(&ctbl->cl_list[i].lock);
+ rte_atomic32_set(&ctbl->cl_list[i].refcnt, 0);
+ }
+
+ return ctbl;
+}
+
+/**
+ * Cleanup CLIP Table
+ */
+void t4_cleanup_clip_tbl(struct adapter *adap)
+{
+ if (adap->clipt)
+ t4_os_free(adap->clipt);
+}
diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h
new file mode 100644
index 00000000..737ccc69
--- /dev/null
+++ b/drivers/net/cxgbe/clip_tbl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_CLIP_H_
+#define _CXGBE_CLIP_H_
+
+/*
+ * State for the corresponding entry of the HW CLIP table.
+ */
+struct clip_entry {
+ enum filter_type type; /* entry type */
+ u32 addr[4]; /* IPV4 or IPV6 address */
+ rte_spinlock_t lock; /* entry lock */
+ rte_atomic32_t refcnt; /* entry reference count */
+};
+
+struct clip_tbl {
+ unsigned int clipt_start; /* start index of CLIP table */
+ unsigned int clipt_size; /* size of CLIP table */
+ rte_rwlock_t lock; /* table rw lock */
+ struct clip_entry cl_list[0]; /* MUST BE LAST */
+};
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end);
+void t4_cleanup_clip_tbl(struct adapter *adap);
+struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip);
+void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce);
+#endif /* _CXGBE_CLIP_H_ */
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index e4a52560..5e6f5c98 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -18,12 +18,21 @@
#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */
#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
+/* Max poll time is 100 * 100msec = 10 sec */
+#define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */
+#define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
+
#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */
-#define CXGBE_RSS_HF_ALL (ETH_RSS_IPV4 | ETH_RSS_IPV6 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP)
+#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_OTHER | \
+ ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan"
#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up"
@@ -32,16 +41,20 @@ bool force_linkup(struct adapter *adap);
int cxgbe_probe(struct adapter *adapter);
int cxgbevf_probe(struct adapter *adapter);
void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
+int cxgbe_set_link_status(struct port_info *pi, bool status);
int cxgbe_up(struct adapter *adap);
int cxgbe_down(struct port_info *pi);
void cxgbe_close(struct adapter *adapter);
void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats);
void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats);
void cxgbe_stats_reset(struct port_info *pi);
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, struct t4_completion *c);
int link_start(struct port_info *pi);
void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,
unsigned int cnt, unsigned int size, unsigned int iqe_size);
int setup_sge_fwevtq(struct adapter *adapter);
+int setup_sge_ctrl_txq(struct adapter *adapter);
void cfg_queues(struct rte_eth_dev *eth_dev);
int cfg_queue_count(struct rte_eth_dev *eth_dev);
int init_rss(struct adapter *adap);
@@ -50,5 +63,6 @@ void cxgbe_enable_rx_queues(struct port_info *pi);
void print_port_info(struct adapter *adap);
void print_adapter_info(struct adapter *adap);
int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key);
+void configure_max_ethqsets(struct adapter *adapter);
#endif /* _CXGBE_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h
index 779bcf16..5d47c5f3 100644
--- a/drivers/net/cxgbe/cxgbe_compat.h
+++ b/drivers/net/cxgbe/cxgbe_compat.h
@@ -198,15 +198,6 @@ static inline int cxgbe_fls(int x)
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
-/**
- * cxgbe_ffs - find first bit set
- * @x: the word to search
- */
-static inline int cxgbe_ffs(int x)
-{
- return x ? __builtin_ffs(x) : 0;
-}
-
static inline unsigned long ilog2(unsigned long n)
{
unsigned int e = 0;
@@ -250,4 +241,16 @@ static inline void writel_relaxed(unsigned int val, volatile void __iomem *addr)
rte_write32_relaxed(val, addr);
}
+/*
+ * Multiplies an integer by a fraction, while avoiding unnecessary
+ * overflow or loss of precision.
+ */
+#define mult_frac(x, numer, denom)( \
+{ \
+ typeof(x) quot = (x) / (denom); \
+ typeof(x) rem = (x) % (denom); \
+ (quot * (numer)) + ((rem * (numer)) / (denom)); \
+} \
+)
+
#endif /* _CXGBE_COMPAT_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 61115e26..4dcad7a2 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -36,6 +36,7 @@
#include "cxgbe.h"
#include "cxgbe_pfvf.h"
+#include "cxgbe_flow.h"
/*
* Macros needed to support the PCI Device ID Table ...
@@ -199,24 +200,89 @@ void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
}
int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
- __rte_unused int wait_to_complete)
+ int wait_to_complete)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
- struct rte_eth_link new_link;
- unsigned int work_done, budget = 4;
+ struct rte_eth_link new_link = { 0 };
+ unsigned int i, work_done, budget = 32;
+ u8 old_link = pi->link_cfg.link_ok;
- cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+ for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* Exit if link status changed or always forced up */
+ if (pi->link_cfg.link_ok != old_link || force_linkup(adapter))
+ break;
+
+ if (!wait_to_complete)
+ break;
+
+ rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
+ }
new_link.link_status = force_linkup(adapter) ?
ETH_LINK_UP : pi->link_cfg.link_ok;
+ new_link.link_autoneg = pi->link_cfg.autoneg;
new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
new_link.link_speed = pi->link_cfg.speed;
return rte_eth_linkstatus_set(eth_dev, &new_link);
}
+/**
+ * Set device link up.
+ */
+int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ unsigned int work_done, budget = 32;
+ struct sge *s = &adapter->sge;
+ int ret;
+
+ /* Flush all link events */
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* If link already up, nothing to do */
+ if (pi->link_cfg.link_ok)
+ return 0;
+
+ ret = cxgbe_set_link_status(pi, true);
+ if (ret)
+ return ret;
+
+ cxgbe_dev_link_update(dev, 1);
+ return 0;
+}
+
+/**
+ * Set device link down.
+ */
+int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ unsigned int work_done, budget = 32;
+ struct sge *s = &adapter->sge;
+ int ret;
+
+ /* Flush all link events */
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* If link already down, nothing to do */
+ if (!pi->link_cfg.link_ok)
+ return 0;
+
+ ret = cxgbe_set_link_status(pi, false);
+ if (ret)
+ return ret;
+
+ cxgbe_dev_link_update(dev, 0);
+ return 0;
+}
+
int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -352,7 +418,11 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
CXGBE_FUNC_TRACE();
configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
- if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(configured_offloads)) {
dev_info(adapter, "can't disable hw crc strip\n");
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_CRC_STRIP;
@@ -363,6 +433,11 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
if (err)
return err;
adapter->flags |= FW_QUEUE_BOUND;
+ if (is_pf4(adapter)) {
+ err = setup_sge_ctrl_txq(adapter);
+ if (err)
+ return err;
+ }
}
err = cfg_queue_count(eth_dev);
@@ -794,13 +869,13 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
return err;
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
- rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
- rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
}
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
- rss_hf |= ETH_RSS_IPV6;
+ rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
@@ -809,7 +884,7 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
}
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
- rss_hf |= ETH_RSS_IPV4;
+ rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
rss_conf->rss_hf = rss_hf;
@@ -1026,6 +1101,8 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.dev_infos_get = cxgbe_dev_info_get,
.dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
.link_update = cxgbe_dev_link_update,
+ .dev_set_link_up = cxgbe_dev_set_link_up,
+ .dev_set_link_down = cxgbe_dev_set_link_down,
.mtu_set = cxgbe_dev_mtu_set,
.tx_queue_setup = cxgbe_dev_tx_queue_setup,
.tx_queue_start = cxgbe_dev_tx_queue_start,
@@ -1035,6 +1112,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.rx_queue_start = cxgbe_dev_rx_queue_start,
.rx_queue_stop = cxgbe_dev_rx_queue_stop,
.rx_queue_release = cxgbe_dev_rx_queue_release,
+ .filter_ctrl = cxgbe_dev_filter_ctrl,
.stats_get = cxgbe_dev_stats_get,
.stats_reset = cxgbe_dev_stats_reset,
.flow_ctrl_get = cxgbe_flow_ctrl_get,
diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c
new file mode 100644
index 00000000..7f0d3800
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_filter.c
@@ -0,0 +1,1252 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include <rte_net.h>
+#include "common.h"
+#include "t4_tcb.h"
+#include "t4_regs.h"
+#include "cxgbe_filter.h"
+#include "clip_tbl.h"
+
+/**
+ * Initialize Hash Filters
+ */
+int init_hash_filter(struct adapter *adap)
+{
+ unsigned int n_user_filters;
+ unsigned int user_filter_perc;
+ int ret;
+ u32 params[7], val[7];
+
+#define FW_PARAM_DEV(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+ V_FW_PARAMS_PARAM_Y(0) | \
+ V_FW_PARAMS_PARAM_Z(0))
+
+ params[0] = FW_PARAM_DEV(NTID);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ params, val);
+ if (ret < 0)
+ return ret;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+
+ user_filter_perc = 100;
+ n_user_filters = mult_frac(adap->tids.nftids,
+ user_filter_perc,
+ 100);
+
+ adap->tids.nftids = n_user_filters;
+ adap->params.hash_filter = 1;
+ return 0;
+}
+
+/**
+ * Validate if the requested filter specification can be set by checking
+ * if the requested features have been enabled
+ */
+int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
+{
+ u32 fconf;
+
+ /*
+ * Check for unconfigured fields being used.
+ */
+ fconf = adapter->params.tp.vlan_pri_map;
+
+#define S(_field) \
+ (fs->val._field || fs->mask._field)
+#define U(_mask, _field) \
+ (!(fconf & (_mask)) && S(_field))
+
+ if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
+ return -EOPNOTSUPP;
+
+#undef S
+#undef U
+
+ /*
+ * If the user is requesting that the filter action loop
+ * matching packets back out one of our ports, make sure that
+ * the egress port is in range.
+ */
+ if (fs->action == FILTER_SWITCH &&
+ fs->eport >= adapter->params.nports)
+ return -ERANGE;
+
+ /*
+ * Don't allow various trivially obvious bogus out-of-range
+ * values ...
+ */
+ if (fs->val.iport >= adapter->params.nports)
+ return -ERANGE;
+
+ return 0;
+}
+
+/**
+ * Get the queue to which the traffic must be steered to.
+ */
+static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
+ struct ch_filter_specification *fs)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int iq;
+
+ /*
+ * If the user has requested steering matching Ingress Packets
+ * to a specific Queue Set, we need to make sure it's in range
+ * for the port and map that into the Absolute Queue ID of the
+ * Queue Set's Response Queue.
+ */
+ if (!fs->dirsteer) {
+ iq = 0;
+ } else {
+ /*
+ * If the iq id is greater than the number of qsets,
+ * then assume it is an absolute qid.
+ */
+ if (fs->iq < pi->n_rx_qsets)
+ iq = adapter->sge.ethrxq[pi->first_qset +
+ fs->iq].rspq.abs_id;
+ else
+ iq = fs->iq;
+ }
+
+ return iq;
+}
+
+/* Return an error number if the indicated filter isn't writable ... */
+int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * Send CPL_SET_TCB_FIELD message
+ */
+static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
+ u16 word, u64 mask, u64 val, int no_reply)
+{
+ struct rte_mbuf *mbuf;
+ struct cpl_set_tcb_field *req;
+ struct sge_ctrl_txq *ctrlq;
+
+ ctrlq = &adapter->sge.ctrlq[0];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ WARN_ON(!mbuf);
+
+ mbuf->data_len = sizeof(*req);
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+ memset(req, 0, sizeof(*req));
+ INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
+ req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
+ V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
+ V_NO_REPLY(no_reply));
+ req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+
+ t4_mgmt_tx(ctrlq, mbuf);
+}
+
+/**
+ * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
+ */
+static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
+ struct cpl_set_tcb_field *req,
+ unsigned int word,
+ u64 mask, u64 val, u8 cookie,
+ int no_reply)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
+ req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
+ V_QUEUENO(0));
+ req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+ sc = (struct ulptx_idata *)(req + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Check if entry already filled.
+ */
+bool is_filter_set(struct tid_info *t, int fidx, int family)
+{
+ bool result = FALSE;
+ int i, max;
+
+ /* IPv6 requires four slots and IPv4 requires only 1 slot.
+ * Ensure, there's enough slots available.
+ */
+ max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
+
+ t4_os_lock(&t->ftid_lock);
+ for (i = fidx; i <= max; i++) {
+ if (rte_bitmap_get(t->ftid_bmap, i)) {
+ result = TRUE;
+ break;
+ }
+ }
+ t4_os_unlock(&t->ftid_lock);
+ return result;
+}
+
+/**
+ * Allocate a available free entry
+ */
+int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
+{
+ struct tid_info *t = &adap->tids;
+ int pos;
+ int size = t->nftids;
+
+ t4_os_lock(&t->ftid_lock);
+ if (family == FILTER_TYPE_IPV6)
+ pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
+ else
+ pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
+ t4_os_unlock(&t->ftid_lock);
+
+ return pos < size ? pos : -1;
+}
+
+/**
+ * Construct hash filter ntuple.
+ */
+static u64 hash_filter_ntuple(const struct filter_entry *f)
+{
+ struct adapter *adap = ethdev2adap(f->dev);
+ struct tp_params *tp = &adap->params.tp;
+ u64 ntuple = 0;
+ u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
+
+ if (tp->port_shift >= 0)
+ ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
+
+ if (tp->protocol_shift >= 0) {
+ if (!f->fs.val.proto)
+ ntuple |= (u64)tcp_proto << tp->protocol_shift;
+ else
+ ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
+ }
+
+ if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
+ ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
+
+ if (ntuple != tp->hash_filter_mask)
+ return 0;
+
+ return ntuple;
+}
+
+/**
+ * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
+ */
+static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
+ unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*abort_req) -
+ sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+ abort_req->rsvd0 = cpu_to_be32(0);
+ abort_req->rsvd1 = 0;
+ abort_req->cmd = CPL_ABORT_NO_RST;
+ sc = (struct ulptx_idata *)(abort_req + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
+ */
+static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
+ unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*abort_rpl) -
+ sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ abort_rpl->rsvd0 = cpu_to_be32(0);
+ abort_rpl->rsvd1 = 0;
+ abort_rpl->cmd = CPL_ABORT_NO_RST;
+ sc = (struct ulptx_idata *)(abort_rpl + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Delete the specified hash filter.
+ */
+static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
+ unsigned int filter_id,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ int ret;
+
+ if (filter_id > adapter->tids.ntids)
+ return -E2BIG;
+
+ f = lookup_tid(t, filter_id);
+ if (!f) {
+ dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ unsigned int wrlen;
+ struct rte_mbuf *mbuf;
+ struct work_request_hdr *wr;
+ struct ulptx_idata *aligner;
+ struct cpl_set_tcb_field *req;
+ struct cpl_abort_req *abort_req;
+ struct cpl_abort_rpl *abort_rpl;
+
+ f->ctx = ctx;
+ f->pending = 1;
+
+ wrlen = cxgbe_roundup(sizeof(*wr) +
+ (sizeof(*req) + sizeof(*aligner)) +
+ sizeof(*abort_req) + sizeof(*abort_rpl),
+ 16);
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ dev_err(adapter, "%s: could not allocate skb ..\n",
+ __func__);
+ goto out_err;
+ }
+
+ mbuf->data_len = wrlen;
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+ INIT_ULPTX_WR(req, wrlen, 0, 0);
+ wr = (struct work_request_hdr *)req;
+ wr++;
+ req = (struct cpl_set_tcb_field *)wr;
+ mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
+ V_TCB_RSS_INFO(M_TCB_RSS_INFO),
+ V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
+ 0, 1);
+ aligner = (struct ulptx_idata *)(req + 1);
+ abort_req = (struct cpl_abort_req *)(aligner + 1);
+ mk_abort_req_ulp(abort_req, f->tid);
+ abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
+ mk_abort_rpl_ulp(abort_rpl, f->tid);
+ t4_mgmt_tx(ctrlq, mbuf);
+ }
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/**
+ * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
+ */
+static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req6 *req = NULL;
+ u64 local_lo, local_hi, peer_lo, peer_hi;
+ u32 *lip = (u32 *)f->fs.val.lip;
+ u32 *fip = (u32 *)f->fs.val.fip;
+
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T6:
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
+
+ INIT_TP_WR(req, 0);
+ break;
+ default:
+ dev_err(adap, "%s: unsupported chip type!\n", __func__);
+ return;
+ }
+
+ local_hi = ((u64)lip[1]) << 32 | lip[0];
+ local_lo = ((u64)lip[3]) << 32 | lip[2];
+ peer_hi = ((u64)fip[1]) << 32 | fip[0];
+ peer_lo = ((u64)fip[3]) << 32 | fip[2];
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip_hi = local_hi;
+ req->local_ip_lo = local_lo;
+ req->peer_ip_hi = peer_hi;
+ req->peer_ip_lo = peer_lo;
+ req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+ V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+ << 1) |
+ V_TX_CHAN(f->fs.eport) |
+ V_ULP_MODE(ULP_MODE_NONE) |
+ F_TCAM_BYPASS | F_NON_OFFLOAD);
+ req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+ req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+ V_RSS_QUEUE(f->fs.iq) |
+ F_T5_OPT_2_VALID |
+ F_RX_CHANNEL |
+ V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
+}
+
+/**
+ * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
+ */
+static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req *req = NULL;
+
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T6:
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
+
+ INIT_TP_WR(req, 0);
+ break;
+ default:
+ dev_err(adap, "%s: unsupported chip type!\n", __func__);
+ return;
+ }
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
+ f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
+ req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
+ f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+ req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+ V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+ << 1) |
+ V_TX_CHAN(f->fs.eport) |
+ V_ULP_MODE(ULP_MODE_NONE) |
+ F_TCAM_BYPASS | F_NON_OFFLOAD);
+ req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+ req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+ V_RSS_QUEUE(f->fs.iq) |
+ F_T5_OPT_2_VALID |
+ F_RX_CHANNEL |
+ V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
+}
+
+/**
+ * Set the specified hash filter.
+ */
+static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct rte_mbuf *mbuf;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int iq;
+ int atid, size;
+ int ret = 0;
+
+ ret = validate_filter(adapter, fs);
+ if (ret)
+ return ret;
+
+ iq = get_filter_steerq(dev, fs);
+
+ ctrlq = &adapter->sge.ctrlq[pi->port_id];
+
+ f = t4_os_alloc(sizeof(*f));
+ if (!f)
+ goto out_err;
+
+ f->fs = *fs;
+ f->ctx = ctx;
+ f->dev = dev;
+ f->fs.iq = iq;
+
+ atid = cxgbe_alloc_atid(t, f);
+ if (atid < 0)
+ goto out_err;
+
+ if (f->fs.type) {
+ /* IPv6 hash filter */
+ f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
+ if (!f->clipt)
+ goto free_atid;
+
+ size = sizeof(struct cpl_t6_act_open_req6);
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto free_clip;
+ }
+
+ mbuf->data_len = size;
+ mbuf->pkt_len = mbuf->data_len;
+
+ mk_act_open_req6(f, mbuf,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ } else {
+ /* IPv4 hash filter */
+ size = sizeof(struct cpl_t6_act_open_req);
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto free_atid;
+ }
+
+ mbuf->data_len = size;
+ mbuf->pkt_len = mbuf->data_len;
+
+ mk_act_open_req(f, mbuf,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ }
+
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+
+free_clip:
+ cxgbe_clip_release(f->dev, f->clipt);
+free_atid:
+ cxgbe_free_atid(t, atid);
+
+out_err:
+ t4_os_free(f);
+ return ret;
+}
+
+/**
+ * Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+void clear_filter(struct filter_entry *f)
+{
+ if (f->clipt)
+ cxgbe_clip_release(f->dev, f->clipt);
+
+ /*
+ * The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+/**
+ * t4_mk_filtdelwr - create a delete filter WR
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter. If @qid is
+ * negative the delete notification is suppressed.
+ */
+static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+ memset(wr, 0, sizeof(*wr));
+ wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
+ wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_NOREPLY(qid < 0));
+ wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
+ if (qid >= 0)
+ wr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
+/**
+ * Create FW work request to delete the filter at a specified index
+ */
+static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct rte_mbuf *mbuf;
+ struct fw_filter_wr *fwr;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf)
+ return -ENOMEM;
+
+ mbuf->data_len = sizeof(*fwr);
+ mbuf->pkt_len = mbuf->data_len;
+
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /*
+ * Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+}
+
+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct rte_mbuf *mbuf;
+ struct fw_filter_wr *fwr;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ int ret;
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mbuf->data_len = sizeof(*fwr);
+ mbuf->pkt_len = mbuf->data_len;
+
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+ memset(fwr, 0, sizeof(*fwr));
+
+ /*
+ * Construct the work request to set the filter.
+ */
+ fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
+ fwr->tid_to_iq =
+ cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
+ V_FW_FILTER_WR_RQTYPE(f->fs.type) |
+ V_FW_FILTER_WR_NOREPLY(0) |
+ V_FW_FILTER_WR_IQ(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
+ V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+ V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+ V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+ V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
+ V_FW_FILTER_WR_PRIO(f->fs.prio));
+ fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
+ fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
+ V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
+ ));
+ fwr->maci_to_matchtypem =
+ cpu_to_be32(V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+ V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = cpu_to_be16(f->fs.val.lport);
+ fwr->lpm = cpu_to_be16(f->fs.mask.lport);
+ fwr->fp = cpu_to_be16(f->fs.val.fport);
+ fwr->fpm = cpu_to_be16(f->fs.mask.fport);
+
+ /*
+ * Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+
+out:
+ return ret;
+}
+
+/**
+ * Set the corresponding entry in the bitmap. 4 slots are
+ * marked for IPv6, whereas only 1 slot is marked for IPv4.
+ */
+static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
+{
+ t4_os_lock(&t->ftid_lock);
+ if (rte_bitmap_get(t->ftid_bmap, fidx)) {
+ t4_os_unlock(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == FILTER_TYPE_IPV4) {
+ rte_bitmap_set(t->ftid_bmap, fidx);
+ } else {
+ rte_bitmap_set(t->ftid_bmap, fidx);
+ rte_bitmap_set(t->ftid_bmap, fidx + 1);
+ rte_bitmap_set(t->ftid_bmap, fidx + 2);
+ rte_bitmap_set(t->ftid_bmap, fidx + 3);
+ }
+ t4_os_unlock(&t->ftid_lock);
+ return 0;
+}
+
+/**
+ * Clear the corresponding entry in the bitmap. 4 slots are
+ * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
+ */
+static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
+{
+ t4_os_lock(&t->ftid_lock);
+ if (family == FILTER_TYPE_IPV4) {
+ rte_bitmap_clear(t->ftid_bmap, fidx);
+ } else {
+ rte_bitmap_clear(t->ftid_bmap, fidx);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 1);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 2);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 3);
+ }
+ t4_os_unlock(&t->ftid_lock);
+}
+
+/**
+ * Check a delete filter request for validity and send it to the hardware.
+ * Return 0 on success, an error number otherwise. We attach any provided
+ * filter operation context to the internal filter specification in order to
+ * facilitate signaling completion of the operation.
+ */
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct filter_entry *f;
+ unsigned int chip_ver;
+ int ret;
+
+ if (is_hashfilter(adapter) && fs->cap)
+ return cxgbe_del_hash_filter(dev, filter_id, ctx);
+
+ if (filter_id >= adapter->tids.nftids)
+ return -ERANGE;
+
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ ret = is_filter_set(&adapter->tids, filter_id, fs->type);
+ if (!ret) {
+ dev_warn(adap, "%s: could not find filter entry: %u\n",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure filter id is aligned on the 2 slot boundary for T6,
+ * and 4 slot boundary for cards below T6.
+ */
+ if (fs->type) {
+ if (chip_ver < CHELSIO_T6)
+ filter_id &= ~(0x3);
+ else
+ filter_id &= ~(0x1);
+ }
+
+ f = &adapter->tids.ftid_tab[filter_id];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ f->ctx = ctx;
+ cxgbe_clear_ftid(&adapter->tids,
+ f->tid - adapter->tids.ftid_base,
+ f->fs.type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ return del_filter_wr(dev, filter_id);
+ }
+
+ /*
+ * If the caller has passed in a Completion Context then we need to
+ * mark it as a successful completion so they don't stall waiting
+ * for it.
+ */
+ if (ctx) {
+ ctx->result = 0;
+ t4_complete(&ctx->completion);
+ }
+
+ return 0;
+}
+
+/**
+ * Check a Chelsio Filter Request for validity, convert it into our internal
+ * format and send it to the hardware. Return 0 on success, an error number
+ * otherwise. We attach any provided filter operation context to the internal
+ * filter specification in order to facilitate signaling completion of the
+ * operation.
+ */
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int fidx, iq, fid_bit = 0;
+ struct filter_entry *f;
+ unsigned int chip_ver;
+ uint8_t bitoff[16] = {0};
+ int ret;
+
+ if (is_hashfilter(adapter) && fs->cap)
+ return cxgbe_set_hash_filter(dev, fs, ctx);
+
+ if (filter_id >= adapter->tids.nftids)
+ return -ERANGE;
+
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ ret = validate_filter(adapter, fs);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure filter id is aligned on the 4 slot boundary for IPv6
+ * maskfull filters.
+ */
+ if (fs->type)
+ filter_id &= ~(0x3);
+
+ ret = is_filter_set(&adapter->tids, filter_id, fs->type);
+ if (ret)
+ return -EBUSY;
+
+ iq = get_filter_steerq(dev, fs);
+
+ /*
+ * IPv6 filters occupy four slots and must be aligned on four-slot
+ * boundaries for T5. On T6, IPv6 filters occupy two-slots and
+ * must be aligned on two-slot boundaries.
+ *
+ * IPv4 filters only occupy a single slot and have no alignment
+ * requirements but writing a new IPv4 filter into the middle
+ * of an existing IPv6 filter requires clearing the old IPv6
+ * filter.
+ */
+ if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
+ /*
+ * For T6, If our IPv4 filter isn't being written to a
+ * multiple of two filter index and there's an IPv6
+ * filter at the multiple of 2 base slot, then we need
+ * to delete that IPv6 filter ...
+ * For adapters below T6, IPv6 filter occupies 4 entries.
+ */
+ if (chip_ver < CHELSIO_T6)
+ fidx = filter_id & ~0x3;
+ else
+ fidx = filter_id & ~0x1;
+
+ if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid)
+ return -EBUSY;
+ }
+ } else { /* IPv6 */
+ unsigned int max_filter_id;
+
+ if (chip_ver < CHELSIO_T6) {
+ /*
+ * Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3)
+ return -EINVAL;
+
+ max_filter_id = filter_id + 4;
+ } else {
+ /*
+ * For T6, CLIP being enabled, IPv6 filter would occupy
+ * 2 entries.
+ */
+ if (filter_id & 0x1)
+ return -EINVAL;
+
+ max_filter_id = filter_id + 2;
+ }
+
+ /*
+ * Check all except the base overlapping IPv4 filter
+ * slots.
+ */
+ for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid)
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * Check to make sure that provided filter index is not
+ * already in use by someone else
+ */
+ f = &adapter->tids.ftid_tab[filter_id];
+ if (f->valid)
+ return -EBUSY;
+
+ fidx = adapter->tids.ftid_base + filter_id;
+ fid_bit = filter_id;
+ ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
+ if (ret)
+ return ret;
+
+ /*
+ * Check to make sure the filter requested is writable ...
+ */
+ ret = writable_filter(f);
+ if (ret) {
+ /* Clear the bits we have set above */
+ cxgbe_clear_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ return ret;
+ }
+
+ /*
+ * Allocate a clip table entry only if we have non-zero IPv6 address
+ */
+ if (chip_ver > CHELSIO_T5 && fs->type &&
+ memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
+ f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
+ if (!f->clipt)
+ goto free_tid;
+ }
+
+ /*
+ * Convert the filter specification into our internal format.
+ * We copy the PF/VF specification into the Outer VLAN field
+ * here so the rest of the code -- including the interface to
+ * the firmware -- doesn't have to constantly do these checks.
+ */
+ f->fs = *fs;
+ f->fs.iq = iq;
+ f->dev = dev;
+
+ /*
+ * Attempt to set the filter. If we don't succeed, we clear
+ * it and return the failure.
+ */
+ f->ctx = ctx;
+ f->tid = fidx; /* Save the actual tid */
+ ret = set_filter_wr(dev, filter_id);
+ if (ret) {
+ fid_bit = f->tid - adapter->tids.ftid_base;
+ goto free_tid;
+ }
+
+ return ret;
+
+free_tid:
+ cxgbe_clear_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ clear_filter(f);
+ return ret;
+}
+
+/**
+ * Handle a Hash filter write reply.
+ */
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
+{
+ struct tid_info *t = &adap->tids;
+ struct filter_entry *f;
+ struct filter_ctx *ctx = NULL;
+ unsigned int tid = GET_TID(rpl);
+ unsigned int ftid = G_TID_TID(G_AOPEN_ATID
+ (be32_to_cpu(rpl->atid_status)));
+ unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+
+ f = lookup_atid(t, ftid);
+ if (!f) {
+ dev_warn(adap, "%s: could not find filter entry: %d\n",
+ __func__, ftid);
+ return;
+ }
+
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ switch (status) {
+ case CPL_ERR_NONE: {
+ f->tid = tid;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+
+ cxgbe_insert_tid(t, f, f->tid, 0);
+ cxgbe_free_atid(t, ftid);
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ if (f->fs.hitcnts)
+ set_tcb_field(adap, tid,
+ W_TCB_TIMESTAMP,
+ V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
+ V_TCB_T_RTT_TS_RECENT_AGE
+ (M_TCB_T_RTT_TS_RECENT_AGE),
+ V_TCB_TIMESTAMP(0ULL) |
+ V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
+ 1);
+ break;
+ }
+ default:
+ dev_warn(adap, "%s: filter creation failed with status = %u\n",
+ __func__, status);
+
+ if (ctx) {
+ if (status == CPL_ERR_TCAM_FULL)
+ ctx->result = -EAGAIN;
+ else
+ ctx->result = -EINVAL;
+ }
+
+ cxgbe_free_atid(t, ftid);
+ t4_os_free(f);
+ }
+
+ if (ctx)
+ t4_complete(&ctx->completion);
+}
+
+/**
+ * Handle a LE-TCAM filter write/deletion reply.
+ */
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ struct filter_entry *f = NULL;
+ unsigned int tid = GET_TID(rpl);
+ int idx, max_fidx = adap->tids.nftids;
+
+ /* Get the corresponding filter entry for this tid */
+ if (adap->tids.ftid_tab) {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+
+ f = &adap->tids.ftid_tab[idx];
+ if (f->tid != tid)
+ return;
+ }
+
+ /* We found the filter entry for this tid */
+ if (f) {
+ unsigned int ret = G_COOKIE(rpl->cookie);
+ struct filter_ctx *ctx;
+
+ /*
+ * Pull off any filter operation context attached to the
+ * filter.
+ */
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ } else if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /*
+ * Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(f);
+ if (ctx)
+ ctx->result = 0;
+ } else {
+ /*
+ * Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_warn(adap, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(f);
+ if (ctx)
+ ctx->result = -EINVAL;
+ }
+
+ if (ctx)
+ t4_complete(&ctx->completion);
+ }
+}
+
+/*
+ * Retrieve the packet count for the specified filter.
+ */
+int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *c, int hash, bool get_byte)
+{
+ struct filter_entry *f;
+ unsigned int tcb_base, tcbaddr;
+ int ret;
+
+ tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
+ if (is_hashfilter(adapter) && hash) {
+ if (fidx < adapter->tids.ntids) {
+ f = adapter->tids.tid_tab[fidx];
+ if (!f)
+ return -EINVAL;
+
+ if (is_t5(adapter->params.chip)) {
+ *c = 0;
+ return 0;
+ }
+ tcbaddr = tcb_base + (fidx * TCB_SIZE);
+ goto get_count;
+ } else {
+ return -ERANGE;
+ }
+ } else {
+ if (fidx >= adapter->tids.nftids)
+ return -ERANGE;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+
+ tcbaddr = tcb_base + f->tid * TCB_SIZE;
+ }
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+
+get_count:
+ if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
+ /*
+ * For T5, the Filter Packet Hit Count is maintained as a
+ * 32-bit Big Endian value in the TCB field {timestamp}.
+ * Similar to the craziness above, instead of the filter hit
+ * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
+ * sizeof(u32)), it actually shows up at offset 24. Whacky.
+ */
+ if (get_byte) {
+ unsigned int word_offset = 4;
+ __be64 be64_byte_count;
+
+ t4_os_lock(&adapter->win0_lock);
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr +
+ (word_offset * sizeof(__be32)),
+ sizeof(be64_byte_count),
+ &be64_byte_count,
+ T4_MEMORY_READ);
+ t4_os_unlock(&adapter->win0_lock);
+ if (ret < 0)
+ return ret;
+ *c = be64_to_cpu(be64_byte_count);
+ } else {
+ unsigned int word_offset = 6;
+ __be32 be32_count;
+
+ t4_os_lock(&adapter->win0_lock);
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr +
+ (word_offset * sizeof(__be32)),
+ sizeof(be32_count), &be32_count,
+ T4_MEMORY_READ);
+ t4_os_unlock(&adapter->win0_lock);
+ if (ret < 0)
+ return ret;
+ *c = (u64)be32_to_cpu(be32_count);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Handle a Hash filter delete reply.
+ */
+void hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl)
+{
+ struct tid_info *t = &adap->tids;
+ struct filter_entry *f;
+ struct filter_ctx *ctx = NULL;
+ unsigned int tid = GET_TID(rpl);
+
+ f = lookup_tid(t, tid);
+ if (!f) {
+ dev_warn(adap, "%s: could not find filter entry: %u\n",
+ __func__, tid);
+ return;
+ }
+
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ f->valid = 0;
+
+ if (f->clipt)
+ cxgbe_clip_release(f->dev, f->clipt);
+
+ cxgbe_remove_tid(t, 0, tid, 0);
+ t4_os_free(f);
+
+ if (ctx) {
+ ctx->result = 0;
+ t4_complete(&ctx->completion);
+ }
+}
diff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h
new file mode 100644
index 00000000..af8fa752
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_filter.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_FILTER_H_
+#define _CXGBE_FILTER_H_
+
+#include "t4_msg.h"
+/*
+ * Defined bit width of user definable filter tuples
+ */
+#define ETHTYPE_BITWIDTH 16
+#define FRAG_BITWIDTH 1
+#define MACIDX_BITWIDTH 9
+#define FCOE_BITWIDTH 1
+#define IPORT_BITWIDTH 3
+#define MATCHTYPE_BITWIDTH 3
+#define PROTO_BITWIDTH 8
+#define TOS_BITWIDTH 8
+#define PF_BITWIDTH 8
+#define VF_BITWIDTH 8
+#define IVLAN_BITWIDTH 16
+#define OVLAN_BITWIDTH 16
+
+/*
+ * Filter matching rules. These consist of a set of ingress packet field
+ * (value, mask) tuples. The associated ingress packet field matches the
+ * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
+ * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
+ * matches an ingress packet when all of the individual individual field
+ * matching rules are true.
+ *
+ * Partial field masks are always valid, however, while it may be easy to
+ * understand their meanings for some fields (e.g. IP address to match a
+ * subnet), for others making sensible partial masks is less intuitive (e.g.
+ * MPS match type) ...
+ */
+struct ch_filter_tuple {
+ /*
+ * Compressed header matching field rules. The TP_VLAN_PRI_MAP
+ * register selects which of these fields will participate in the
+ * filter match rules -- up to a maximum of 36 bits. Because
+ * TP_VLAN_PRI_MAP is a global register, all filters must use the same
+ * set of fields.
+ */
+ uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
+ uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
+ uint32_t ivlan_vld:1; /* inner VLAN valid */
+ uint32_t ovlan_vld:1; /* outer VLAN valid */
+ uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
+ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
+ uint32_t iport:IPORT_BITWIDTH; /* ingress port */
+ uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
+ uint32_t proto:PROTO_BITWIDTH; /* protocol type */
+ uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
+ uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
+ uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
+ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
+ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+
+ /*
+ * Uncompressed header matching field rules. These are always
+ * available for field rules.
+ */
+ uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
+ uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
+ uint16_t lport; /* local port */
+ uint16_t fport; /* foreign port */
+
+ /* reservations for future additions */
+ uint8_t rsvd[12];
+};
+
+/*
+ * Filter specification
+ */
+struct ch_filter_specification {
+ /* Administrative fields for filter. */
+ uint32_t hitcnts:1; /* count filter hits in TCB */
+ uint32_t prio:1; /* filter has priority over active/server */
+
+ /*
+ * Fundamental filter typing. This is the one element of filter
+ * matching that doesn't exist as a (value, mask) tuple.
+ */
+ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
+ uint32_t cap:1; /* 0 => LE-TCAM, 1 => Hash */
+
+ /*
+ * Packet dispatch information. Ingress packets which match the
+ * filter rules will be dropped, passed to the host or switched back
+ * out as egress packets.
+ */
+ uint32_t action:2; /* drop, pass, switch */
+
+ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
+ uint32_t iq:10; /* ingress queue */
+
+ uint32_t eport:2; /* egress port to switch packet out */
+
+ /* Filter rule value/mask pairs. */
+ struct ch_filter_tuple val;
+ struct ch_filter_tuple mask;
+};
+
+enum {
+ FILTER_PASS = 0, /* default */
+ FILTER_DROP,
+ FILTER_SWITCH
+};
+
+enum filter_type {
+ FILTER_TYPE_IPV4 = 0,
+ FILTER_TYPE_IPV6,
+};
+
+struct t4_completion {
+ unsigned int done; /* completion done (0 - No, 1 - Yes) */
+ rte_spinlock_t lock; /* completion lock */
+};
+
+/*
+ * Filter operation context to allow callers to wait for
+ * an asynchronous completion.
+ */
+struct filter_ctx {
+ struct t4_completion completion; /* completion rendezvous */
+ int result; /* result of operation */
+ u32 tid; /* to store tid of hash filter */
+};
+
+/*
+ * Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware or the
+ * firmware command.
+ */
+struct filter_entry {
+ /*
+ * Administrative fields for filter.
+ */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+ u32 pending:1; /* filter action is pending FW reply */
+ struct filter_ctx *ctx; /* caller's completion hook */
+ struct clip_entry *clipt; /* CLIP Table entry for IPv6 */
+ struct rte_eth_dev *dev; /* Port's rte eth device */
+ void *private; /* For use by apps using filter_entry */
+
+ /* This will store the actual tid */
+ u32 tid;
+
+ /*
+ * The filter itself.
+ */
+ struct ch_filter_specification fs;
+};
+
+#define FILTER_ID_MAX (~0U)
+
+struct tid_info;
+struct adapter;
+
+/**
+ * Find first clear bit in the bitmap.
+ */
+static inline unsigned int cxgbe_find_first_zero_bit(struct rte_bitmap *bmap,
+ unsigned int size)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < size; idx++)
+ if (!rte_bitmap_get(bmap, idx))
+ break;
+
+ return idx;
+}
+
+/**
+ * Find a free region of 'num' consecutive entries.
+ */
+static inline unsigned int
+cxgbe_bitmap_find_free_region(struct rte_bitmap *bmap, unsigned int size,
+ unsigned int num)
+{
+ unsigned int idx, j, free = 0;
+
+ if (num > size)
+ return size;
+
+ for (idx = 0; idx < size; idx += num) {
+ for (j = 0; j < num; j++) {
+ if (!rte_bitmap_get(bmap, idx + j)) {
+ free++;
+ } else {
+ free = 0;
+ break;
+ }
+ }
+
+ /* Found the Region */
+ if (free == num)
+ break;
+
+ /* Reached the end and still no region found */
+ if ((idx + num) > size) {
+ idx = size;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+bool is_filter_set(struct tid_info *, int fidx, int family);
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+void clear_filter(struct filter_entry *f);
+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx);
+int writable_filter(struct filter_entry *f);
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family);
+int init_hash_filter(struct adapter *adap);
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl);
+void hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl);
+int validate_filter(struct adapter *adap, struct ch_filter_specification *fs);
+int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *c, int hash, bool get_byte);
+#endif /* _CXGBE_FILTER_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
new file mode 100644
index 00000000..01c945f1
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -0,0 +1,845 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include "common.h"
+#include "cxgbe_flow.h"
+
+#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
+do { \
+ if (!((fs)->val.elem || (fs)->mask.elem)) { \
+ (fs)->val.elem = (__v); \
+ (fs)->mask.elem = (__m); \
+ } else { \
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
+ NULL, "a filter can be specified" \
+ " only once"); \
+ } \
+} while (0)
+
+#define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
+do { \
+ memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
+ memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
+} while (0)
+
+#define CXGBE_FILL_FS(v, m, elem) \
+ __CXGBE_FILL_FS(v, m, fs, elem, e)
+
+#define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
+ __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
+
+static int
+cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
+{
+ /* rte_flow specification does not allow it. */
+ if (!i->spec && (i->mask || i->last))
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "last or mask given without spec");
+ /*
+ * We don't support it.
+ * Although, we can support values in last as 0's or last == spec.
+ * But this will not provide user with any additional functionality
+ * and will only increase the complexity for us.
+ */
+ if (i->last)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "last is not supported by chelsio pmd");
+ return 0;
+}
+
+static void
+cxgbe_fill_filter_region(struct adapter *adap,
+ struct ch_filter_specification *fs)
+{
+ struct tp_params *tp = &adap->params.tp;
+ u64 hash_filter_mask = tp->hash_filter_mask;
+ u64 ntuple_mask = 0;
+
+ fs->cap = 0;
+
+ if (!is_hashfilter(adap))
+ return;
+
+ if (fs->type) {
+ uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff};
+ uint8_t bitoff[16] = {0};
+
+ if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, biton, sizeof(biton)))
+ return;
+ } else {
+ uint32_t biton = 0xffffffff;
+ uint32_t bitoff = 0x0U;
+
+ if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, &biton, sizeof(biton)))
+ return;
+ }
+
+ if (!fs->val.lport || fs->mask.lport != 0xffff)
+ return;
+ if (!fs->val.fport || fs->mask.fport != 0xffff)
+ return;
+
+ if (tp->protocol_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
+ if (tp->ethertype_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
+ if (tp->port_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
+
+ if (ntuple_mask != hash_filter_mask)
+ return;
+
+ fs->cap = 1; /* use hash region */
+}
+
+static int
+ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_phy_port *val = item->spec;
+ const struct rte_flow_item_phy_port *umask = item->mask;
+ const struct rte_flow_item_phy_port *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
+
+ if (val->index > 0x7)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "port index upto 0x7 is supported");
+
+ CXGBE_FILL_FS(val->index, mask->index, iport);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_udp *val = item->spec;
+ const struct rte_flow_item_udp *umask = item->mask;
+ const struct rte_flow_item_udp *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
+
+ if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "udp: only src/dst port supported");
+
+ CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
+ if (!val)
+ return 0;
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+ be16_to_cpu(mask->hdr.src_port), fport);
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+ be16_to_cpu(mask->hdr.dst_port), lport);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_tcp *val = item->spec;
+ const struct rte_flow_item_tcp *umask = item->mask;
+ const struct rte_flow_item_tcp *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
+
+ if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
+ mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
+ mask->hdr.tcp_urp)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "tcp: only src/dst port supported");
+
+ CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
+ if (!val)
+ return 0;
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+ be16_to_cpu(mask->hdr.src_port), fport);
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+ be16_to_cpu(mask->hdr.dst_port), lport);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_ipv4 *val = item->spec;
+ const struct rte_flow_item_ipv4 *umask = item->mask;
+ const struct rte_flow_item_ipv4 *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
+
+ if (mask->hdr.time_to_live || mask->hdr.type_of_service)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "ttl/tos are not supported");
+
+ fs->type = FILTER_TYPE_IPV4;
+ CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
+ if (!val)
+ return 0; /* ipv4 wild card */
+
+ CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_ipv6 *val = item->spec;
+ const struct rte_flow_item_ipv6 *umask = item->mask;
+ const struct rte_flow_item_ipv6 *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
+
+ if (mask->hdr.vtc_flow ||
+ mask->hdr.payload_len || mask->hdr.hop_limits)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "tc/flow/hop are not supported");
+
+ fs->type = FILTER_TYPE_IPV6;
+ CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
+ if (!val)
+ return 0; /* ipv6 wild card */
+
+ CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+
+ return 0;
+}
+
+static int
+cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
+ struct rte_flow_error *e)
+{
+ if (attr->egress)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, "attribute:<egress> is"
+ " not supported !");
+ if (attr->group > 0)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, "group parameter is"
+ " not supported.");
+
+ flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
+
+ return 0;
+}
+
+static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+
+ if (rxq > pi->n_rx_qsets)
+ return -EINVAL;
+ return 0;
+}
+
+static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
+{
+ struct adapter *adap = ethdev2adap(f->dev);
+ struct ch_filter_specification fs = f->fs;
+
+ if (fidx >= adap->tids.nftids) {
+ dev_err(adap, "invalid flow index %d.\n", fidx);
+ return -EINVAL;
+ }
+ if (!is_filter_set(&adap->tids, fidx, fs.type)) {
+ dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
+ struct adapter *adap, unsigned int fidx)
+{
+ if (is_filter_set(&adap->tids, fidx, fs->type)) {
+ dev_err(adap, "filter index: %d is busy.\n", fidx);
+ return -EBUSY;
+ }
+ if (fidx >= adap->tids.nftids) {
+ dev_err(adap, "filter index (%u) >= max(%u)\n",
+ fidx, adap->tids.nftids);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
+{
+ if (flow->fs.cap)
+ return 0; /* Hash filters */
+ return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
+ cxgbe_validate_fidxonadd(&flow->fs,
+ ethdev2adap(flow->dev), fidx);
+}
+
+static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ struct adapter *adap = ethdev2adap(flow->dev);
+
+ /* For tcam get the next available slot, if default value specified */
+ if (flow->fidx == FILTER_ID_MAX) {
+ int idx;
+
+ idx = cxgbe_alloc_ftid(adap, fs->type);
+ if (idx < 0) {
+ dev_err(adap, "unable to get a filter index in tcam\n");
+ return -ENOMEM;
+ }
+ *fidx = (unsigned int)idx;
+ } else {
+ *fidx = flow->fidx;
+ }
+
+ return 0;
+}
+
+static int
+ch_rte_parse_atype_switch(const struct rte_flow_action *a,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_action_phy_port *port;
+
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ port = (const struct rte_flow_action_phy_port *)a->conf;
+ fs->eport = port->index;
+ break;
+ default:
+ /* We are not supposed to come here */
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "Action not supported");
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_rtef_parse_actions(struct rte_flow *flow,
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ const struct rte_flow_action_queue *q;
+ const struct rte_flow_action *a;
+ char abit = 0;
+ int ret;
+
+ for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ if (abit++)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "specify only 1 pass/drop");
+ fs->action = FILTER_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ q = (const struct rte_flow_action_queue *)a->conf;
+ if (!q)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, q,
+ "specify rx queue index");
+ if (check_rxq(flow->dev, q->index))
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, q,
+ "Invalid rx queue");
+ if (abit++)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "specify only 1 pass/drop");
+ fs->action = FILTER_PASS;
+ fs->dirsteer = 1;
+ fs->iq = q->index;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ fs->hitcnts = 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ /* We allow multiple switch actions, but switch is
+ * not compatible with either queue or drop
+ */
+ if (abit++ && fs->action != FILTER_SWITCH)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "overlapping action specified");
+ ret = ch_rte_parse_atype_switch(a, fs, e);
+ if (ret)
+ return ret;
+ fs->action = FILTER_SWITCH;
+ break;
+ default:
+ /* Not supported action : return error */
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ a, "Action not supported");
+ }
+ }
+
+ return 0;
+}
+
+struct chrte_fparse parseitem[] = {
+ [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
+ .fptr = ch_rte_parsetype_port,
+ .dmask = &(const struct rte_flow_item_phy_port){
+ .index = 0x7,
+ }
+ },
+
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .fptr = ch_rte_parsetype_ipv4,
+ .dmask = &rte_flow_item_ipv4_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .fptr = ch_rte_parsetype_ipv6,
+ .dmask = &rte_flow_item_ipv6_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .fptr = ch_rte_parsetype_udp,
+ .dmask = &rte_flow_item_udp_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .fptr = ch_rte_parsetype_tcp,
+ .dmask = &rte_flow_item_tcp_mask,
+ },
+};
+
+static int
+cxgbe_rtef_parse_items(struct rte_flow *flow,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ const struct rte_flow_item *i;
+ char repeat[ARRAY_SIZE(parseitem)] = {0};
+
+ for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
+ struct chrte_fparse *idx = &flow->item_parser[i->type];
+ int ret;
+
+ if (i->type > ARRAY_SIZE(parseitem))
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "Item not supported");
+
+ switch (i->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ continue;
+ default:
+ /* check if item is repeated */
+ if (repeat[i->type])
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, i,
+ "parse items cannot be repeated (except void)");
+ repeat[i->type] = 1;
+
+ /* validate the item */
+ ret = cxgbe_validate_item(i, e);
+ if (ret)
+ return ret;
+
+ if (!idx || !idx->fptr) {
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, i,
+ "Item not supported");
+ } else {
+ ret = idx->fptr(idx->dmask, i, &flow->fs, e);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ cxgbe_fill_filter_region(adap, &flow->fs);
+
+ return 0;
+}
+
+static int
+cxgbe_flow_parse(struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ int ret;
+
+ /* parse user request into ch_filter_specification */
+ ret = cxgbe_rtef_parse_attr(flow, attr, e);
+ if (ret)
+ return ret;
+ ret = cxgbe_rtef_parse_items(flow, item, e);
+ if (ret)
+ return ret;
+ return cxgbe_rtef_parse_actions(flow, action, e);
+}
+
+static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ struct adapter *adap = ethdev2adap(dev);
+ struct tid_info *t = &adap->tids;
+ struct filter_ctx ctx;
+ unsigned int fidx;
+ int err;
+
+ if (cxgbe_get_fidx(flow, &fidx))
+ return -ENOMEM;
+ if (cxgbe_verify_fidx(flow, fidx, 0))
+ return -1;
+
+ t4_init_completion(&ctx.completion);
+ /* go create the filter */
+ err = cxgbe_set_filter(dev, fidx, fs, &ctx);
+ if (err) {
+ dev_err(adap, "Error %d while creating filter.\n", err);
+ return err;
+ }
+
+ /* Poll the FW for reply */
+ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
+ CXGBE_FLOW_POLL_US,
+ CXGBE_FLOW_POLL_CNT,
+ &ctx.completion);
+ if (err) {
+ dev_err(adap, "Filter set operation timed out (%d)\n", err);
+ return err;
+ }
+ if (ctx.result) {
+ dev_err(adap, "Hardware error %d while creating the filter.\n",
+ ctx.result);
+ return ctx.result;
+ }
+
+ if (fs->cap) { /* to destroy the filter */
+ flow->fidx = ctx.tid;
+ flow->f = lookup_tid(t, ctx.tid);
+ } else {
+ flow->fidx = fidx;
+ flow->f = &adap->tids.ftid_tab[fidx];
+ }
+
+ return 0;
+}
+
+static struct rte_flow *
+cxgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct rte_flow *flow;
+ int ret;
+
+ flow = t4_os_alloc(sizeof(struct rte_flow));
+ if (!flow) {
+ rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unable to allocate memory for"
+ " filter_entry");
+ return NULL;
+ }
+
+ flow->item_parser = parseitem;
+ flow->dev = dev;
+
+ if (cxgbe_flow_parse(flow, attr, item, action, e)) {
+ t4_os_free(flow);
+ return NULL;
+ }
+
+ /* go, interact with cxgbe_filter */
+ ret = __cxgbe_flow_create(dev, flow);
+ if (ret) {
+ rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unable to create flow rule");
+ t4_os_free(flow);
+ return NULL;
+ }
+
+ flow->f->private = flow; /* Will be used during flush */
+
+ return flow;
+}
+
+static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct filter_entry *f = flow->f;
+ struct ch_filter_specification *fs;
+ struct filter_ctx ctx;
+ int err;
+
+ fs = &f->fs;
+ if (cxgbe_verify_fidx(flow, flow->fidx, 1))
+ return -1;
+
+ t4_init_completion(&ctx.completion);
+ err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
+ if (err) {
+ dev_err(adap, "Error %d while deleting filter.\n", err);
+ return err;
+ }
+
+ /* Poll the FW for reply */
+ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
+ CXGBE_FLOW_POLL_US,
+ CXGBE_FLOW_POLL_CNT,
+ &ctx.completion);
+ if (err) {
+ dev_err(adap, "Filter delete operation timed out (%d)\n", err);
+ return err;
+ }
+ if (ctx.result) {
+ dev_err(adap, "Hardware error %d while deleting the filter.\n",
+ ctx.result);
+ return ctx.result;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *e)
+{
+ int ret;
+
+ ret = __cxgbe_flow_destroy(dev, flow);
+ if (ret)
+ return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ flow, "error destroying filter.");
+ t4_os_free(flow);
+ return 0;
+}
+
+static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
+ u64 *byte_count)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ struct ch_filter_specification fs = flow->f->fs;
+ unsigned int fidx = flow->fidx;
+ int ret = 0;
+
+ ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
+ if (ret)
+ return ret;
+ return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
+}
+
+static int
+cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *data,
+ struct rte_flow_error *e)
+{
+ struct ch_filter_specification fs;
+ struct rte_flow_query_count *c;
+ struct filter_entry *f;
+ int ret;
+
+ RTE_SET_USED(dev);
+
+ f = flow->f;
+ fs = f->fs;
+
+ if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "only count supported for query");
+
+ /*
+ * This is a valid operation, Since we are allowed to do chelsio
+ * specific operations in rte side of our code but not vise-versa
+ *
+ * So, fs can be queried/modified here BUT rte_flow_query_count
+ * cannot be worked on by the lower layer since we want to maintain
+ * it as rte_flow agnostic.
+ */
+ if (!fs.hitcnts)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ &fs, "filter hit counters were not"
+ " enabled during filter creation");
+
+ c = (struct rte_flow_query_count *)data;
+ ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
+ if (ret)
+ return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
+ f, "cxgbe pmd failed to"
+ " perform query");
+
+ /* Query was successful */
+ c->bytes_set = 1;
+ c->hits_set = 1;
+
+ return 0; /* success / partial_success */
+}
+
+static int
+cxgbe_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct rte_flow *flow;
+ unsigned int fidx;
+ int ret;
+
+ flow = t4_os_alloc(sizeof(struct rte_flow));
+ if (!flow)
+ return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Unable to allocate memory for filter_entry");
+
+ flow->item_parser = parseitem;
+ flow->dev = dev;
+
+ ret = cxgbe_flow_parse(flow, attr, item, action, e);
+ if (ret) {
+ t4_os_free(flow);
+ return ret;
+ }
+
+ if (validate_filter(adap, &flow->fs)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "validation failed. Check f/w config file.");
+ }
+
+ if (cxgbe_get_fidx(flow, &fidx)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "no memory in tcam.");
+ }
+
+ if (cxgbe_verify_fidx(flow, fidx, 0)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "validation failed");
+ }
+
+ t4_os_free(flow);
+ return 0;
+}
+
+/*
+ * @ret : > 0 filter destroyed succsesfully
+ * < 0 error destroying filter
+ * == 1 filter not active / not found
+ */
+static int
+cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
+ struct rte_flow_error *e)
+{
+ if (f && (f->valid || f->pending) &&
+ f->dev == dev && /* Only if user has asked for this port */
+ f->private) /* We (rte_flow) created this filter */
+ return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
+ e);
+ return 1;
+}
+
+static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ unsigned int i;
+ int ret = 0;
+
+ if (adap->tids.ftid_tab) {
+ struct filter_entry *f = &adap->tids.ftid_tab[0];
+
+ for (i = 0; i < adap->tids.nftids; i++, f++) {
+ ret = cxgbe_check_n_destroy(f, dev, e);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ if (is_hashfilter(adap) && adap->tids.tid_tab) {
+ struct filter_entry *f;
+
+ for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
+ f = (struct filter_entry *)adap->tids.tid_tab[i];
+
+ ret = cxgbe_check_n_destroy(f, dev, e);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+out:
+ return ret >= 0 ? 0 : ret;
+}
+
+static const struct rte_flow_ops cxgbe_flow_ops = {
+ .validate = cxgbe_flow_validate,
+ .create = cxgbe_flow_create,
+ .destroy = cxgbe_flow_destroy,
+ .flush = cxgbe_flow_flush,
+ .query = cxgbe_flow_query,
+ .isolate = NULL,
+};
+
+int
+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ RTE_SET_USED(dev);
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &cxgbe_flow_ops;
+ break;
+ default:
+ ret = -ENOTSUP;
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/net/cxgbe/cxgbe_flow.h b/drivers/net/cxgbe/cxgbe_flow.h
new file mode 100644
index 00000000..0f750474
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_flow.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#ifndef _CXGBE_FLOW_H_
+#define _CXGBE_FLOW_H_
+
+#include <rte_flow_driver.h>
+#include "cxgbe_filter.h"
+#include "cxgbe.h"
+
+#define CXGBE_FLOW_POLL_US 10
+#define CXGBE_FLOW_POLL_CNT 10
+
+struct chrte_fparse {
+ int (*fptr)(const void *mask, /* currently supported mask */
+ const struct rte_flow_item *item, /* user input */
+ struct ch_filter_specification *fs, /* where to parse */
+ struct rte_flow_error *e);
+ const void *dmask; /* Specify what is supported by chelsio by default*/
+};
+
+struct rte_flow {
+ struct filter_entry *f;
+ struct ch_filter_specification fs; /* temp, to create filter */
+ struct chrte_fparse *item_parser;
+ /*
+ * filter_entry doesn't store user priority.
+ * Post creation of filter this will indicate the
+ * flow index (fidx) for both hash and tcam filters
+ */
+ unsigned int fidx;
+ struct rte_eth_dev *dev;
+};
+
+int
+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+
+#endif /* _CXGBE_FLOW_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 54eb23df..c3938e8d 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -37,6 +37,23 @@
#include "t4_regs.h"
#include "t4_msg.h"
#include "cxgbe.h"
+#include "clip_tbl.h"
+
+/**
+ * Allocate a chunk of memory. The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+ return rte_zmalloc(NULL, size, 0);
+}
+
+/**
+ * Free memory allocated through t4_alloc_mem().
+ */
+void t4_free_mem(void *addr)
+{
+ rte_free(addr);
+}
/*
* Response queue handler for the FW event queue.
@@ -70,6 +87,18 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_fw6_msg *msg = (const void *)rsp;
t4_handle_fw_rpl(q->adapter, msg->data);
+ } else if (opcode == CPL_ABORT_RPL_RSS) {
+ const struct cpl_abort_rpl_rss *p = (const void *)rsp;
+
+ hash_del_filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_SET_TCB_RPL) {
+ const struct cpl_set_tcb_rpl *p = (const void *)rsp;
+
+ filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_ACT_OPEN_RPL) {
+ const struct cpl_act_open_rpl *p = (const void *)rsp;
+
+ hash_filter_rpl(q->adapter, p);
} else {
dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
opcode);
@@ -78,6 +107,79 @@ out:
return 0;
}
+/**
+ * Setup sge control queues to pass control information.
+ */
+int setup_sge_ctrl_txq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int err = 0, i = 0;
+
+ for_each_port(adapter, i) {
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct sge_ctrl_txq *q = &s->ctrlq[i];
+
+ q->q.size = 1024;
+ err = t4_sge_alloc_ctrl_txq(adapter, q,
+ adapter->eth_dev, i,
+ s->fw_evtq.cntxt_id,
+ rte_socket_id());
+ if (err) {
+ dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
+ err);
+ goto out;
+ }
+ snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i);
+ q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
+ RTE_CACHE_LINE_SIZE,
+ RTE_MBUF_PRIV_ALIGN,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ SOCKET_ID_ANY);
+ if (!q->mb_pool) {
+ dev_err(adapter, "Can't create ctrl pool for port: %d",
+ i);
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ return 0;
+out:
+ t4_free_sge_resources(adapter);
+ return err;
+}
+
+/**
+ * cxgbe_poll_for_completion: Poll rxq for completion
+ * @q: rxq to poll
+ * @us: microseconds to delay
+ * @cnt: number of times to poll
+ * @c: completion to check for 'done' status
+ *
+ * Polls the rxq for reples until completion is done or the count
+ * expires.
+ */
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, struct t4_completion *c)
+{
+ unsigned int i;
+ unsigned int work_done, budget = 4;
+
+ if (!c)
+ return -EINVAL;
+
+ for (i = 0; i < cnt; i++) {
+ cxgbe_poll(q, NULL, budget, &work_done);
+ t4_os_lock(&c->lock);
+ if (c->done) {
+ t4_os_unlock(&c->lock);
+ return 0;
+ }
+ t4_os_unlock(&c->lock);
+ udelay(us);
+ }
+ return -ETIMEDOUT;
+}
+
int setup_sge_fwevtq(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
@@ -169,6 +271,174 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
return 0;
}
+/**
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgbe_alloc_atid(struct tid_info *t, void *data)
+{
+ int atid = -1;
+
+ t4_os_lock(&t->atid_lock);
+ if (t->afree) {
+ union aopen_entry *p = t->afree;
+
+ atid = p - t->atid_tab;
+ t->afree = p->next;
+ p->data = data;
+ t->atids_in_use++;
+ }
+ t4_os_unlock(&t->atid_lock);
+ return atid;
+}
+
+/**
+ * Release an active-open TID.
+ */
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
+{
+ union aopen_entry *p = &t->atid_tab[atid];
+
+ t4_os_lock(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ t4_os_unlock(&t->atid_lock);
+}
+
+/**
+ * Populate a TID_RELEASE WR. Caller must properly size the skb.
+ */
+static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
+ INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
+}
+
+/**
+ * Release a TID and inform HW. If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
+ unsigned short family)
+{
+ struct rte_mbuf *mbuf;
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ WARN_ON(tid >= t->ntids);
+
+ if (t->tid_tab[tid]) {
+ t->tid_tab[tid] = NULL;
+ rte_atomic32_dec(&t->conns_in_use);
+ if (t->hash_base && tid >= t->hash_base) {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_dec(&t->hash_tids_in_use);
+ } else {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_dec(&t->tids_in_use);
+ }
+ }
+
+ mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
+ if (mbuf) {
+ mbuf->data_len = sizeof(struct cpl_tid_release);
+ mbuf->pkt_len = mbuf->data_len;
+ mk_tid_release(mbuf, tid);
+ t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
+ }
+}
+
+/**
+ * Insert a TID.
+ */
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+ unsigned short family)
+{
+ t->tid_tab[tid] = data;
+ if (t->hash_base && tid >= t->hash_base) {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_inc(&t->hash_tids_in_use);
+ } else {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_inc(&t->tids_in_use);
+ }
+
+ rte_atomic32_inc(&t->conns_in_use);
+}
+
+/**
+ * Free TID tables.
+ */
+static void tid_free(struct tid_info *t)
+{
+ if (t->tid_tab) {
+ if (t->ftid_bmap)
+ rte_bitmap_free(t->ftid_bmap);
+
+ if (t->ftid_bmap_array)
+ t4_os_free(t->ftid_bmap_array);
+
+ t4_os_free(t->tid_tab);
+ }
+
+ memset(t, 0, sizeof(struct tid_info));
+}
+
+/**
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+ size_t size;
+ unsigned int ftid_bmap_size;
+ unsigned int natids = t->natids;
+ unsigned int max_ftids = t->nftids;
+
+ ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
+ size = t->ntids * sizeof(*t->tid_tab) +
+ max_ftids * sizeof(*t->ftid_tab) +
+ natids * sizeof(*t->atid_tab);
+
+ t->tid_tab = t4_os_alloc(size);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+ t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->natids];
+ t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
+ if (!t->ftid_bmap_array) {
+ tid_free(t);
+ return -ENOMEM;
+ }
+
+ t4_os_lock_init(&t->atid_lock);
+ t4_os_lock_init(&t->ftid_lock);
+
+ t->afree = NULL;
+ t->atids_in_use = 0;
+ rte_atomic32_init(&t->tids_in_use);
+ rte_atomic32_set(&t->tids_in_use, 0);
+ rte_atomic32_init(&t->conns_in_use);
+ rte_atomic32_set(&t->conns_in_use, 0);
+
+ /* Setup the free list for atid_tab and clear the stid bitmap. */
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+
+ t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
+ ftid_bmap_size);
+ if (!t->ftid_bmap) {
+ tid_free(t);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static inline bool is_x_1g_port(const struct link_config *lc)
{
return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
@@ -243,7 +513,7 @@ void cfg_queues(struct rte_eth_dev *eth_dev)
* We default up to # of cores queues per 1G/10G port.
*/
if (nb_ports)
- q_per_port = (MAX_ETH_QSETS -
+ q_per_port = (s->max_ethqsets -
(adap->params.nports - nb_ports)) /
nb_ports;
@@ -267,8 +537,6 @@ void cfg_queues(struct rte_eth_dev *eth_dev)
qidx += pi->n_rx_qsets;
}
- s->max_ethqsets = qidx;
-
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
@@ -500,6 +768,40 @@ static void configure_pcie_ext_tag(struct adapter *adapter)
}
}
+/* Figure out how many Queue Sets we can support */
+void configure_max_ethqsets(struct adapter *adapter)
+{
+ unsigned int ethqsets;
+
+ /*
+ * We need to reserve an Ingress Queue for the Asynchronous Firmware
+ * Event Queue.
+ *
+ * For each Queue Set, we'll need the ability to allocate two Egress
+ * Contexts -- one for the Ingress Queue Free List and one for the TX
+ * Ethernet Queue.
+ */
+ if (is_pf4(adapter)) {
+ struct pf_resources *pfres = &adapter->params.pfres;
+
+ ethqsets = pfres->niqflint - 1;
+ if (pfres->neq < ethqsets * 2)
+ ethqsets = pfres->neq / 2;
+ } else {
+ struct vf_resources *vfres = &adapter->params.vfres;
+
+ ethqsets = vfres->niqflint - 1;
+ if (vfres->nethctrl != ethqsets)
+ ethqsets = min(vfres->nethctrl, ethqsets);
+ if (vfres->neq < ethqsets * 2)
+ ethqsets = vfres->neq / 2;
+ }
+
+ if (ethqsets > MAX_ETH_QSETS)
+ ethqsets = MAX_ETH_QSETS;
+ adapter->sge.max_ethqsets = ethqsets;
+}
+
/*
* Tweak configuration based on system architecture, etc. Most of these have
* defaults assigned to them by Firmware Configuration Files (if we're using
@@ -638,8 +940,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* This will allow the firmware to optimize aspects of the hardware
* configuration which will result in improved performance.
*/
- caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER |
- FW_CAPS_CONFIG_NIC_ETHOFLD));
+ caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
caps_cmd.toecaps = 0;
caps_cmd.iscsicaps = 0;
caps_cmd.rdmacaps = 0;
@@ -706,6 +1007,7 @@ bye:
static int adap_init0(struct adapter *adap)
{
+ struct fw_caps_config_cmd caps_cmd;
int ret = 0;
u32 v, port_vec;
enum dev_state state;
@@ -781,6 +1083,17 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
+ /* Now that we've successfully configured and initialized the adapter
+ * (or found it already initialized), we can ask the Firmware what
+ * resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning info\n");
+ goto bye;
+ }
+
/* Find out what ports are available to us. */
v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
@@ -822,6 +1135,50 @@ static int adap_init0(struct adapter *adap)
V_FW_PARAMS_PARAM_Y(0) | \
V_FW_PARAMS_PARAM_Z(0))
+ params[0] = FW_PARAM_PFVF(FILTER_START);
+ params[1] = FW_PARAM_PFVF(FILTER_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ftid_base = val[0];
+ adap->tids.nftids = val[1] - val[0] + 1;
+
+ params[0] = FW_PARAM_PFVF(CLIP_START);
+ params[1] = FW_PARAM_PFVF(CLIP_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->clipt_start = val[0];
+ adap->clipt_end = val[1];
+
+ /*
+ * Get device capabilities so we can determine what resources we need
+ * to manage.
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ if (ret < 0)
+ goto bye;
+
+ if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
+ is_t6(adap->params.chip)) {
+ if (init_hash_filter(adap) < 0)
+ goto bye;
+ }
+
+ /* query tid-related parameters */
+ params[0] = FW_PARAM_DEV(NTID);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ params, val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+
/* If we're running on newer firmware, let it know that we're
* prepared to deal with encapsulated CPL messages. Older
* firmware won't understand this and we'll just get
@@ -887,6 +1244,7 @@ static int adap_init0(struct adapter *adap)
t4_init_tp_params(adap);
configure_pcie_ext_tag(adap);
configure_vlan_types(adap);
+ configure_max_ethqsets(adap);
adap->params.drv_memwin = MEMWIN_NIC;
adap->flags |= FW_OK;
@@ -1027,7 +1385,7 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
if (rss_hf & ~CXGBE_RSS_HF_ALL)
return -EINVAL;
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
@@ -1037,14 +1395,16 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
F_FW_RSS_VI_CONFIG_CMD_UDPEN;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
- flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+ if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
- flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
+ if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
F_FW_RSS_VI_CONFIG_CMD_UDPEN;
rxq = &adapter->sge.ethrxq[pi->first_qset];
@@ -1259,6 +1619,30 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
}
/**
+ * cxgbe_set_link_status - Set device link up or down.
+ * @pi: Underlying port's info
+ * @status: 0 - down, 1 - up
+ *
+ * Set the device link up or down.
+ */
+int cxgbe_set_link_status(struct port_info *pi, bool status)
+{
+ struct adapter *adapter = pi->adapter;
+ int err = 0;
+
+ err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status);
+ if (err) {
+ dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
+ return err;
+ }
+
+ if (!status)
+ t4_reset_link_config(adapter, pi->pidx);
+
+ return 0;
+}
+
+/**
* cxgb_up - enable the adapter
* @adap: adapter being enabled
*
@@ -1283,17 +1667,7 @@ int cxgbe_up(struct adapter *adap)
*/
int cxgbe_down(struct port_info *pi)
{
- struct adapter *adapter = pi->adapter;
- int err = 0;
-
- err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false);
- if (err) {
- dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
- return err;
- }
-
- t4_reset_link_config(adapter, pi->pidx);
- return 0;
+ return cxgbe_set_link_status(pi, false);
}
/*
@@ -1307,6 +1681,8 @@ void cxgbe_close(struct adapter *adapter)
if (adapter->flags & FULL_INIT_DONE) {
if (is_pf4(adapter))
t4_intr_disable(adapter);
+ tid_free(&adapter->tids);
+ t4_cleanup_clip_tbl(adapter);
t4_sge_tx_monitor_stop(adapter);
t4_free_sge_resources(adapter);
for_each_port(adapter, i) {
@@ -1350,6 +1726,7 @@ int cxgbe_probe(struct adapter *adapter)
t4_os_lock_init(&adapter->mbox_lock);
TAILQ_INIT(&adapter->mbox_list);
+ t4_os_lock_init(&adapter->win0_lock);
err = t4_prep_adapter(adapter);
if (err)
@@ -1469,6 +1846,35 @@ allocate_mac:
print_adapter_info(adapter);
print_port_info(adapter);
+ adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+ adapter->clipt_end);
+ if (!adapter->clipt) {
+ /* We tolerate a lack of clip_table, giving up some
+ * functionality
+ */
+ dev_warn(adapter, "could not allocate CLIP. Continuing\n");
+ }
+
+ if (tid_init(&adapter->tids) < 0) {
+ /* Disable filtering support */
+ dev_warn(adapter, "could not allocate TID table, "
+ "filter support disabled. Continuing\n");
+ }
+
+ if (is_hashfilter(adapter)) {
+ if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
+ u32 hash_base, hash_reg;
+
+ hash_reg = A_LE_DB_TID_HASHBASE;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base / 4;
+ }
+ } else {
+ /* Disable hash filtering support */
+ dev_warn(adapter,
+ "Maskless filter support disabled. Continuing\n");
+ }
+
err = init_rss(adapter);
if (err)
goto out_free;
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
new file mode 100644
index 00000000..50931ed0
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_OFLD_H_
+#define _CXGBE_OFLD_H_
+
+#include <rte_bitmap.h>
+
+#include "cxgbe_filter.h"
+
+#define INIT_TP_WR(w, tid) do { \
+ (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_TP_WR) | \
+ V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+ (w)->wr.wr_mid = cpu_to_be32( \
+ V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+#define INIT_TP_WR_MIT_CPL(w, cpl, tid) do { \
+ INIT_TP_WR(w, tid); \
+ OPCODE_TID(w) = cpu_to_be32(MK_OPCODE_TID(cpl, tid)); \
+} while (0)
+
+#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
+ (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | \
+ V_FW_WR_ATOMIC(atomic)); \
+ (w)->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+/*
+ * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+union aopen_entry {
+ void *data;
+ union aopen_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of filter TID.
+ * The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+ void **tid_tab;
+ unsigned int ntids;
+ struct filter_entry *ftid_tab; /* Normal filters */
+ union aopen_entry *atid_tab;
+ struct rte_bitmap *ftid_bmap;
+ uint8_t *ftid_bmap_array;
+ unsigned int nftids, natids;
+ unsigned int ftid_base, hash_base;
+
+ union aopen_entry *afree;
+ unsigned int atids_in_use;
+
+ /* TIDs in the TCAM */
+ rte_atomic32_t tids_in_use;
+ /* TIDs in the HASH */
+ rte_atomic32_t hash_tids_in_use;
+ rte_atomic32_t conns_in_use;
+
+ rte_spinlock_t atid_lock __rte_cache_aligned;
+ rte_spinlock_t ftid_lock;
+};
+
+static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
+{
+ return tid < t->ntids ? t->tid_tab[tid] : NULL;
+}
+
+static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
+{
+ return atid < t->natids ? t->atid_tab[atid].data : NULL;
+}
+
+int cxgbe_alloc_atid(struct tid_info *t, void *data);
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid);
+void cxgbe_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
+ unsigned short family);
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+ unsigned short family);
+
+#endif /* _CXGBE_OFLD_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_pfvf.h b/drivers/net/cxgbe/cxgbe_pfvf.h
index 2bba9742..8d0a105a 100644
--- a/drivers/net/cxgbe/cxgbe_pfvf.h
+++ b/drivers/net/cxgbe/cxgbe_pfvf.h
@@ -35,6 +35,8 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
int cxgbe_dev_start(struct rte_eth_dev *eth_dev);
int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
int wait_to_complete);
+int cxgbe_dev_set_link_up(struct rte_eth_dev *dev);
+int cxgbe_dev_set_link_down(struct rte_eth_dev *dev);
uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
diff --git a/drivers/net/cxgbe/cxgbevf_ethdev.c b/drivers/net/cxgbe/cxgbevf_ethdev.c
index a942ba6b..3b32ca9d 100644
--- a/drivers/net/cxgbe/cxgbevf_ethdev.c
+++ b/drivers/net/cxgbe/cxgbevf_ethdev.c
@@ -50,6 +50,7 @@ static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev,
/* TX Stats */
eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames +
ps.tx_ucast_frames;
+ eth_stats->obytes = ps.tx_octets;
eth_stats->oerrors = ps.tx_drop;
for (i = 0; i < pi->n_rx_qsets; i++) {
@@ -85,6 +86,8 @@ static const struct eth_dev_ops cxgbevf_eth_dev_ops = {
.dev_infos_get = cxgbe_dev_info_get,
.dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
.link_update = cxgbe_dev_link_update,
+ .dev_set_link_up = cxgbe_dev_set_link_up,
+ .dev_set_link_down = cxgbe_dev_set_link_down,
.mtu_set = cxgbe_dev_mtu_set,
.tx_queue_setup = cxgbe_dev_tx_queue_setup,
.tx_queue_start = cxgbe_dev_tx_queue_start,
diff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c
index 5b3fb539..4214d031 100644
--- a/drivers/net/cxgbe/cxgbevf_main.c
+++ b/drivers/net/cxgbe/cxgbevf_main.c
@@ -20,7 +20,7 @@
static void size_nports_qsets(struct adapter *adapter)
{
struct vf_resources *vfres = &adapter->params.vfres;
- unsigned int ethqsets, pmask_nports;
+ unsigned int pmask_nports;
/*
* The number of "ports" which we support is equal to the number of
@@ -49,23 +49,7 @@ static void size_nports_qsets(struct adapter *adapter)
adapter->params.nports = pmask_nports;
}
- /*
- * We need to reserve an Ingress Queue for the Asynchronous Firmware
- * Event Queue.
- *
- * For each Queue Set, we'll need the ability to allocate two Egress
- * Contexts -- one for the Ingress Queue Free List and one for the TX
- * Ethernet Queue.
- */
- ethqsets = vfres->niqflint - 1;
- if (vfres->nethctrl != ethqsets)
- ethqsets = min(vfres->nethctrl, ethqsets);
- if (vfres->neq < ethqsets * 2)
- ethqsets = vfres->neq / 2;
- if (ethqsets > MAX_ETH_QSETS)
- ethqsets = MAX_ETH_QSETS;
- adapter->sge.max_ethqsets = ethqsets;
-
+ configure_max_ethqsets(adapter);
if (adapter->sge.max_ethqsets < adapter->params.nports) {
dev_warn(adapter->pdev_dev, "only using %d of %d available"
" virtual interfaces (too few Queue Sets)\n",
diff --git a/drivers/net/cxgbe/meson.build b/drivers/net/cxgbe/meson.build
new file mode 100644
index 00000000..7c69a34b
--- /dev/null
+++ b/drivers/net/cxgbe/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('cxgbe_ethdev.c',
+ 'cxgbe_main.c',
+ 'cxgbevf_ethdev.c',
+ 'cxgbevf_main.c',
+ 'sge.c',
+ 'cxgbe_filter.c',
+ 'cxgbe_flow.c',
+ 'clip_tbl.c',
+ 'base/t4_hw.c',
+ 'base/t4vf_hw.c')
+includes += include_directories('base')
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index b5d3611d..4ea40d19 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -55,6 +55,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
#define MAX_IMM_TX_PKT_LEN 256
/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
+/*
* Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
* per mbuf buffer). We currently only support two sizes for 1500- and
* 9000-byte MTUs. We could easily support more but there doesn't seem to be
@@ -1300,6 +1305,126 @@ out_free:
}
/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any mbufs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+/**
+ * is_imm - check whether a packet can be sent as immediate data
+ * @mbuf: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct rte_mbuf *mbuf)
+{
+ return mbuf->pkt_len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ * inline_tx_mbuf: inline a packet's data into TX descriptors
+ * @q: the TX queue where the packet will be inlined
+ * @from: pointer to data portion of packet
+ * @to: pointer after cpl where data has to be inlined
+ * @len: length of data to inline
+ *
+ * Inline a packet's contents directly to TX descriptors, starting at
+ * the given position within the TX DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to,
+ int len)
+{
+ int left = RTE_PTR_DIFF(q->stat, *to);
+
+ if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) {
+ rte_memcpy(*to, from, len);
+ *to = RTE_PTR_ADD(*to, len);
+ } else {
+ rte_memcpy(*to, from, left);
+ from = RTE_PTR_ADD(from, left);
+ left = len - left;
+ rte_memcpy((void *)q->desc, from, left);
+ *to = RTE_PTR_ADD((void *)q->desc, left);
+ }
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @q: the control queue
+ * @mbuf: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ unsigned int ndesc;
+ struct fw_wr_hdr *wr;
+ caddr_t dst;
+
+ if (unlikely(!is_imm(mbuf))) {
+ WARN_ON(1);
+ rte_pktmbuf_free(mbuf);
+ return -1;
+ }
+
+ reclaim_completed_tx_imm(&q->q);
+ ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc));
+ t4_os_lock(&q->ctrlq_lock);
+
+ q->full = txq_avail(&q->q) < ndesc ? 1 : 0;
+ if (unlikely(q->full)) {
+ t4_os_unlock(&q->ctrlq_lock);
+ return -1;
+ }
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ dst = (void *)wr;
+ inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t),
+ &dst, mbuf->data_len);
+
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < 64))
+ wr->lo |= htonl(F_FW_WR_EQUEQ);
+
+ q->txp++;
+
+ ring_tx_db(q->adapter, &q->q);
+ t4_os_unlock(&q->ctrlq_lock);
+
+ rte_pktmbuf_free(mbuf);
+ return 0;
+}
+
+/**
+ * t4_mgmt_tx - send a management message
+ * @q: the control queue
+ * @mbuf: the packet containing the management message
+ *
+ * Send a management message through control queue.
+ */
+int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ return ctrl_xmit(q, mbuf);
+}
+
+/**
* alloc_ring - allocate resources for an SGE descriptor ring
* @dev: the PCI device's core device
* @nelem: the number of descriptors
@@ -1764,12 +1889,16 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
F_FW_CMD_WRITE | F_FW_CMD_EXEC);
if (is_pf4(adap)) {
- pciechan = cong > 0 ? cxgbe_ffs(cong) - 1 : pi->tx_chan;
+ pciechan = pi->tx_chan;
c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
V_FW_IQ_CMD_VFN(0));
if (cong >= 0)
- c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
- F_FW_IQ_CMD_IQRO);
+ c.iqns_to_fl0congen =
+ htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+ V_FW_IQ_CMD_IQTYPE(cong ?
+ FW_IQ_IQTYPE_NIC :
+ FW_IQ_IQTYPE_OFLD) |
+ F_FW_IQ_CMD_IQRO);
} else {
pciechan = pi->port_id;
}
@@ -2080,6 +2209,64 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return 0;
}
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id)
+{
+ int ret, nentries;
+ struct fw_eq_ctrl_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ eth_dev->device->driver->name, "ctrl_tx_ring",
+ eth_dev->data->port_id, queue_id);
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
+ 0, &txq->q.phys_addr,
+ NULL, 0, queue_id,
+ socket_id, z_name, z_name_sw);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(adap->pf) |
+ V_FW_EQ_CTRL_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC |
+ F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16));
+ c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0));
+ c.physeqid_pkd = htonl(0);
+ c.fetchszm_to_iqid =
+ htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid));
+ c.dcaen_to_eqsize =
+ htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret) {
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)),
+ G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd)));
+ txq->adapter = adap;
+ txq->full = 0;
+ return 0;
+}
+
static void free_txq(struct sge_txq *q)
{
q->cntxt_id = 0;
@@ -2174,7 +2361,7 @@ void t4_sge_tx_monitor_stop(struct adapter *adap)
*/
void t4_free_sge_resources(struct adapter *adap)
{
- int i;
+ unsigned int i;
struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
@@ -2191,6 +2378,18 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
+ /* clean up control Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+ if (cq->q.desc) {
+ reclaim_completed_tx_imm(&cq->q);
+ t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
+ cq->q.cntxt_id);
+ free_txq(&cq->q);
+ }
+ }
+
if (adap->sge.fw_evtq.desc)
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
}
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index d014a11a..7a950ac0 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -74,6 +74,7 @@ static uint64_t dev_tx_offloads_nodis =
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
+static int default_q; /* use default queue - FMC is not executed*/
/* At present we only allow up to 4 push mode queues as default - as each of
* this queue need dedicated portal and we are short of portals.
*/
@@ -516,7 +517,15 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
PMD_INIT_FUNC_TRACE();
- DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
+ if (queue_idx >= dev->data->nb_rx_queues) {
+ rte_errno = EOVERFLOW;
+ DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, queue_idx, dev->data->nb_rx_queues);
+ return -rte_errno;
+ }
+
+ DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
+ queue_idx, rxq->fqid);
if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
struct fman_if_ic_params icp;
@@ -580,9 +589,11 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
}
ret = qman_init_fq(rxq, flags, &opts);
- if (ret)
- DPAA_PMD_ERR("Channel/Queue association failed. fqid %d"
- " ret: %d", rxq->fqid, ret);
+ if (ret) {
+ DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
+ return ret;
+ }
rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
rxq->is_static = true;
@@ -606,7 +617,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return 0;
}
-int __rte_experimental
+int
dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
int eth_rx_queue_id,
u16 ch_id,
@@ -657,8 +668,8 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
ret = qman_init_fq(rxq, flags, &opts);
if (ret) {
- DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d",
- rxq->fqid, ret);
+ DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
return ret;
}
@@ -669,7 +680,7 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
return ret;
}
-int __rte_experimental
+int
dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
int eth_rx_queue_id)
{
@@ -715,7 +726,15 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
PMD_INIT_FUNC_TRACE();
- DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
+ if (queue_idx >= dev->data->nb_tx_queues) {
+ rte_errno = EOVERFLOW;
+ DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, queue_idx, dev->data->nb_tx_queues);
+ return -rte_errno;
+ }
+
+ DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
+ queue_idx, dpaa_intf->tx_queues[queue_idx].fqid);
dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
return 0;
}
@@ -937,7 +956,7 @@ is_dpaa_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &rte_dpaa_pmd);
}
-int __rte_experimental
+int
rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
{
struct rte_eth_dev *dev;
@@ -1008,15 +1027,15 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
ret = qman_reserve_fqid(fqid);
if (ret) {
- DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
+ DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
fqid, ret);
return -EINVAL;
}
- DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
+ DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
if (ret) {
- DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
+ DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
fqid, ret);
return ret;
}
@@ -1032,7 +1051,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
&cgr_opts);
if (ret) {
DPAA_PMD_WARN(
- "rx taildrop init fail on rx fqid %d (ret=%d)",
+ "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
fqid, ret);
goto without_cgr;
}
@@ -1043,7 +1062,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
without_cgr:
ret = qman_init_fq(fq, flags, &opts);
if (ret)
- DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
+ DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
return ret;
}
@@ -1071,10 +1090,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
/* no tx-confirmation */
opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
- DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
+ DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
if (ret)
- DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
+ DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
return ret;
}
@@ -1145,21 +1164,16 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
dpaa_intf->cfg = cfg;
/* Initialize Rx FQ's */
- if (getenv("DPAA_NUM_RX_QUEUES"))
- num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
- else
+ if (default_q) {
num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
-
- /* if push mode queues to be enabled. Currenly we are allowing only
- * one queue per thread.
- */
- if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
- dpaa_push_mode_max_queue =
- atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
- if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
- dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+ } else {
+ if (getenv("DPAA_NUM_RX_QUEUES"))
+ num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
+ else
+ num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
}
+
/* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
* queues.
*/
@@ -1196,8 +1210,11 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
}
for (loop = 0; loop < num_rx_fqs; loop++) {
- fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
- DPAA_PCD_FQID_MULTIPLIER + loop;
+ if (default_q)
+ fqid = cfg->rx_def;
+ else
+ fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+ DPAA_PCD_FQID_MULTIPLIER + loop;
if (dpaa_intf->cgr_rx)
dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
@@ -1372,6 +1389,8 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
if (!eth_dev)
return -ENOMEM;
+ eth_dev->device = &dpaa_dev->device;
+ eth_dev->dev_ops = &dpaa_devops;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -1391,6 +1410,26 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
return ret;
}
+ if (access("/tmp/fmc.bin", F_OK) == -1) {
+ RTE_LOG(INFO, PMD,
+ "* FMC not configured.Enabling default mode\n");
+ default_q = 1;
+ }
+
+ /* disabling the default push mode for LS1043 */
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY)
+ dpaa_push_mode_max_queue = 0;
+
+ /* if push mode queues to be enabled. Currenly we are allowing
+ * only one queue per thread.
+ */
+ if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
+ dpaa_push_mode_max_queue =
+ atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
+ if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
+ dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+ }
+
is_global_init = 1;
}
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 1897b9e4..c79b9f86 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -160,12 +160,14 @@ struct dpaa_if_stats {
uint64_t tund; /**<Tx Undersized */
};
-int __rte_experimental dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
- int eth_rx_queue_id,
+int
+dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
u16 ch_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
-int __rte_experimental dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
+int
+dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
int eth_rx_queue_id);
enum qman_cb_dqrr_result
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 1316d2ad..168b77e4 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -431,7 +431,7 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
}
fd = &dqrr[i]->fd;
- dpaa_intf = fq[i]->dpaa_intf;
+ dpaa_intf = fq[0]->dpaa_intf;
format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
DPAA_FD_FORMAT_SHIFT;
@@ -560,7 +560,8 @@ uint16_t dpaa_eth_queue_rx(void *q,
struct qman_fq *fq = q;
struct qm_dqrr_entry *dq;
uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
- int ret;
+ int num_rx_bufs, ret;
+ uint32_t vdqcr_flags = 0;
if (likely(fq->is_static))
return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
@@ -573,8 +574,19 @@ uint16_t dpaa_eth_queue_rx(void *q,
}
}
- ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
- DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs);
+ /* Until request for four buffers, we provide exact number of buffers.
+ * Otherwise we do not set the QM_VDQCR_EXACT flag.
+ * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+ * requested, so we request two less in this case.
+ */
+ if (nb_bufs < 4) {
+ vdqcr_flags = QM_VDQCR_EXACT;
+ num_rx_bufs = nb_bufs;
+ } else {
+ num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+ (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2);
+ }
+ ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
if (ret)
return 0;
diff --git a/drivers/net/dpaa/rte_pmd_dpaa.h b/drivers/net/dpaa/rte_pmd_dpaa.h
index 38405ec0..37eea9b0 100644
--- a/drivers/net/dpaa/rte_pmd_dpaa.h
+++ b/drivers/net/dpaa/rte_pmd_dpaa.h
@@ -18,9 +18,6 @@
#include <rte_ethdev_driver.h>
/**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
* Enable/Disable TX loopback
*
* @param port
@@ -33,7 +30,7 @@
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int __rte_experimental
+int
rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on);
#endif /* _PMD_DPAA_H_ */
diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map b/drivers/net/dpaa/rte_pmd_dpaa_version.map
index c7ad4030..8cb4500b 100644
--- a/drivers/net/dpaa/rte_pmd_dpaa_version.map
+++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map
@@ -3,10 +3,10 @@ DPDK_17.11 {
local: *;
};
-EXPERIMENTAL {
+DPDK_18.08 {
global:
dpaa_eth_eventq_attach;
dpaa_eth_eventq_detach;
rte_pmd_dpaa_set_tx_loopback;
-};
+} DPDK_17.11;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 9297725d..c5047367 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -2053,9 +2053,7 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = {
RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
-RTE_INIT(dpaa2_pmd_init_log);
-static void
-dpaa2_pmd_init_log(void)
+RTE_INIT(dpaa2_pmd_init_log)
{
dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
if (dpaa2_logtype_pmd >= 0)
diff --git a/drivers/net/dpaa2/dpaa2_pmd_logs.h b/drivers/net/dpaa2/dpaa2_pmd_logs.h
index 98a48968..c04babdb 100644
--- a/drivers/net/dpaa2/dpaa2_pmd_logs.h
+++ b/drivers/net/dpaa2/dpaa2_pmd_logs.h
@@ -16,7 +16,7 @@ extern int dpaa2_logtype_pmd;
rte_log(RTE_LOG_DEBUG, dpaa2_logtype_pmd, "dpaa2_net: %s(): "\
fmt "\n", __func__, ##args)
-#define PMD_INIT_FUNC_TRACE() DPAA2_PMD_LOG(DEBUG, " >>")
+#define PMD_INIT_FUNC_TRACE() DPAA2_PMD_DEBUG(">>")
#define DPAA2_PMD_CRIT(fmt, args...) \
DPAA2_PMD_LOG(CRIT, fmt, ## args)
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index dac086d6..ef109a62 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -447,6 +447,12 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
return 0;
}
+/* This function assumes that caller will be keep the same value for nb_pkts
+ * across calls per queue, if that is not the case, better use non-prefetch
+ * version of rx call.
+ * It will return the packets as requested in previous call without honoring
+ * the current nb_pkts or bufs space.
+ */
uint16_t
dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
@@ -454,7 +460,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
struct qbman_result *dq_storage, *dq_storage1 = NULL;
uint32_t fqid = dpaa2_q->fqid;
- int ret, num_rx = 0;
+ int ret, num_rx = 0, pull_size;
uint8_t pending, status;
struct qbman_swp *swp;
const struct qbman_fd *fd, *next_fd;
@@ -470,12 +476,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
}
swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
-
+ pull_size = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_pkts;
if (unlikely(!q_storage->active_dqs)) {
q_storage->toggle = 0;
dq_storage = q_storage->dq_storage[q_storage->toggle];
- q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
- DPAA2_DQRR_RING_SIZE : nb_pkts;
+ q_storage->last_num_pkts = pull_size;
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_numframes(&pulldesc,
q_storage->last_num_pkts);
@@ -514,7 +520,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
q_storage->toggle ^= 1;
dq_storage1 = q_storage->dq_storage[q_storage->toggle];
qbman_pull_desc_clear(&pulldesc);
- qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
+ qbman_pull_desc_set_numframes(&pulldesc, pull_size);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 69cf119c..9f228169 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -198,7 +198,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
- for (i = 0; i < DPNI_MAX_DPBP; i++) {
+ for (i = 0; i < cmd_params->num_dpbp; i++) {
cmd_params->pool[i].dpbp_id =
cpu_to_le16(cfg->pools[i].dpbp_id);
cmd_params->pool[i].priority_mask =
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 7039dc10..053e855b 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -1823,9 +1823,7 @@ RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci");
/* see e1000_logs.c */
-RTE_INIT(igb_init_log);
-static void
-igb_init_log(void)
+RTE_INIT(igb_init_log)
{
e1000_igb_init_log();
}
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index a6b3e92a..7d2ac4eb 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1364,6 +1364,7 @@ em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER;
if (max_rx_pktlen > ETHER_MAX_LEN)
rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -1458,8 +1459,10 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
@@ -1792,9 +1795,10 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- rxq->crc_len =
- (uint8_t)(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
bus_addr = rxq->rx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_RDLEN(i),
@@ -1873,10 +1877,10 @@ eth_em_rx_init(struct rte_eth_dev *dev)
}
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)
- rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
- else
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+ else
+ rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index edc7be31..64dfe683 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -435,6 +435,9 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
.rx_queue_setup = eth_igb_rx_queue_setup,
.rx_queue_release = eth_igb_rx_queue_release,
+ .rx_descriptor_done = eth_igb_rx_descriptor_done,
+ .rx_descriptor_status = eth_igb_rx_descriptor_status,
+ .tx_descriptor_status = eth_igb_tx_descriptor_status,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
@@ -3194,12 +3197,12 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
- if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
@@ -5683,9 +5686,7 @@ RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");
/* see e1000_logs.c */
-RTE_INIT(e1000_init_log);
-static void
-e1000_init_log(void)
+RTE_INIT(e1000_init_log)
{
e1000_igb_init_log();
}
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 5f729f27..b955068a 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1639,6 +1639,7 @@ igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER;
return rx_offload_capa;
@@ -1720,8 +1721,10 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
/*
* Allocate RX ring hardware descriptors. A memzone large enough to
@@ -2371,8 +2374,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
bus_addr = rxq->rx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
@@ -2501,10 +2506,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
- rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) {
+ rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
- /* set STRCRC bit in all queues */
+ /* clear STRCRC bit in all queues */
if (hw->mac.type == e1000_i350 ||
hw->mac.type == e1000_i210 ||
hw->mac.type == e1000_i211 ||
@@ -2513,14 +2518,14 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
uint32_t dvmolr = E1000_READ_REG(hw,
E1000_DVMOLR(rxq->reg_idx));
- dvmolr |= E1000_DVMOLR_STRCRC;
+ dvmolr &= ~E1000_DVMOLR_STRCRC;
E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
}
}
} else {
- rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+ rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
- /* clear STRCRC bit in all queues */
+ /* set STRCRC bit in all queues */
if (hw->mac.type == e1000_i350 ||
hw->mac.type == e1000_i210 ||
hw->mac.type == e1000_i211 ||
@@ -2529,7 +2534,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
uint32_t dvmolr = E1000_READ_REG(hw,
E1000_DVMOLR(rxq->reg_idx));
- dvmolr &= ~E1000_DVMOLR_STRCRC;
+ dvmolr |= E1000_DVMOLR_STRCRC;
E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
}
}
diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile
index 43339f3b..ff9ce315 100644
--- a/drivers/net/ena/Makefile
+++ b/drivers/net/ena/Makefile
@@ -58,5 +58,6 @@ CFLAGS += $(INCLUDES)
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_timer
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index 38a05877..4abf1a28 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -37,11 +37,19 @@
/*****************************************************************************/
/* Timeout in micro-sec */
-#define ADMIN_CMD_TIMEOUT_US (1000000)
+#define ADMIN_CMD_TIMEOUT_US (3000000)
-#define ENA_ASYNC_QUEUE_DEPTH 4
+#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
+#ifdef ENA_EXTENDED_STATS
+
+#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
+#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
+#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
+
+#endif /* ENA_EXTENDED_STATS */
+
#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
| (ENA_COMMON_SPEC_VERSION_MINOR))
@@ -62,7 +70,9 @@
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
-static int ena_alloc_cnt;
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
+#define ENA_POLL_MS 5
/*****************************************************************************/
/*****************************************************************************/
@@ -86,6 +96,11 @@ struct ena_comp_ctx {
bool occupied;
};
+struct ena_com_stats_ctx {
+ struct ena_admin_aq_get_stats_cmd get_cmd;
+ struct ena_admin_acq_get_stats_resp get_resp;
+};
+
static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
struct ena_common_mem_addr *ena_addr,
dma_addr_t addr)
@@ -95,50 +110,49 @@ static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
return ENA_COM_INVAL;
}
- ena_addr->mem_addr_low = (u32)addr;
- ena_addr->mem_addr_high =
- ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 32)) >> 32);
+ ena_addr->mem_addr_low = lower_32_bits(addr);
+ ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
return 0;
}
static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
{
- ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
- ADMIN_SQ_SIZE(queue->q_depth),
- queue->sq.entries,
- queue->sq.dma_addr,
- queue->sq.mem_handle);
+ struct ena_com_admin_sq *sq = &queue->sq;
+ u16 size = ADMIN_SQ_SIZE(queue->q_depth);
- if (!queue->sq.entries) {
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
+ sq->mem_handle);
+
+ if (!sq->entries) {
ena_trc_err("memory allocation failed");
return ENA_COM_NO_MEM;
}
- queue->sq.head = 0;
- queue->sq.tail = 0;
- queue->sq.phase = 1;
+ sq->head = 0;
+ sq->tail = 0;
+ sq->phase = 1;
- queue->sq.db_addr = NULL;
+ sq->db_addr = NULL;
return 0;
}
static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
{
- ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
- ADMIN_CQ_SIZE(queue->q_depth),
- queue->cq.entries,
- queue->cq.dma_addr,
- queue->cq.mem_handle);
+ struct ena_com_admin_cq *cq = &queue->cq;
+ u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
+ cq->mem_handle);
- if (!queue->cq.entries) {
+ if (!cq->entries) {
ena_trc_err("memory allocation failed");
return ENA_COM_NO_MEM;
}
- queue->cq.head = 0;
- queue->cq.phase = 1;
+ cq->head = 0;
+ cq->phase = 1;
return 0;
}
@@ -146,44 +160,44 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
struct ena_aenq_handlers *aenq_handlers)
{
+ struct ena_com_aenq *aenq = &dev->aenq;
u32 addr_low, addr_high, aenq_caps;
+ u16 size;
dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
- ENA_MEM_ALLOC_COHERENT(dev->dmadev,
- ADMIN_AENQ_SIZE(dev->aenq.q_depth),
- dev->aenq.entries,
- dev->aenq.dma_addr,
- dev->aenq.mem_handle);
+ size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+ ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
+ aenq->entries,
+ aenq->dma_addr,
+ aenq->mem_handle);
- if (!dev->aenq.entries) {
+ if (!aenq->entries) {
ena_trc_err("memory allocation failed");
return ENA_COM_NO_MEM;
}
- dev->aenq.head = dev->aenq.q_depth;
- dev->aenq.phase = 1;
+ aenq->head = aenq->q_depth;
+ aenq->phase = 1;
- addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(dev->aenq.dma_addr);
- addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(dev->aenq.dma_addr);
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_BASE_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_BASE_HI_OFF);
+ ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
aenq_caps = 0;
aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
- ENA_REG_WRITE32(aenq_caps, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_CAPS_OFF);
-
- if (unlikely(!aenq_handlers))
+ if (unlikely(!aenq_handlers)) {
ena_trc_err("aenq handlers pointer is NULL\n");
+ return ENA_COM_INVAL;
+ }
- dev->aenq.aenq_handlers = aenq_handlers;
+ aenq->aenq_handlers = aenq_handlers;
return 0;
}
@@ -217,12 +231,11 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
return &queue->comp_ctx[command_id];
}
-static struct ena_comp_ctx *
-__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_aq_entry *cmd,
- size_t cmd_size_in_bytes,
- struct ena_admin_acq_entry *comp,
- size_t comp_size_in_bytes)
+static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
{
struct ena_comp_ctx *comp_ctx;
u16 tail_masked, cmd_id;
@@ -234,12 +247,9 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
tail_masked = admin_queue->sq.tail & queue_size_mask;
/* In case of queue FULL */
- cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
- admin_queue->sq.tail,
- admin_queue->sq.head,
- admin_queue->q_depth);
+ ena_trc_dbg("admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(ENA_COM_NO_SPACE);
}
@@ -253,6 +263,8 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
+ if (unlikely(!comp_ctx))
+ return ERR_PTR(ENA_COM_INVAL);
comp_ctx->status = ENA_CMD_SUBMITTED;
comp_ctx->comp_size = (u32)comp_size_in_bytes;
@@ -272,7 +284,8 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
admin_queue->sq.phase = !admin_queue->sq.phase;
- ENA_REG_WRITE32(admin_queue->sq.tail, admin_queue->sq.db_addr);
+ ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
+ admin_queue->sq.db_addr);
return comp_ctx;
}
@@ -298,12 +311,11 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
return 0;
}
-static struct ena_comp_ctx *
-ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_aq_entry *cmd,
- size_t cmd_size_in_bytes,
- struct ena_admin_acq_entry *comp,
- size_t comp_size_in_bytes)
+static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
{
unsigned long flags = 0;
struct ena_comp_ctx *comp_ctx;
@@ -317,7 +329,7 @@ ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
cmd_size_in_bytes,
comp,
comp_size_in_bytes);
- if (unlikely(IS_ERR(comp_ctx)))
+ if (IS_ERR(comp_ctx))
admin_queue->running_state = false;
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@@ -331,9 +343,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size_t size;
int dev_node = 0;
- ENA_TOUCH(ctx);
-
- memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+ memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
@@ -347,23 +357,26 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size,
io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle,
ctx->numa_node,
dev_node);
- if (!io_sq->desc_addr.virt_addr)
+ if (!io_sq->desc_addr.virt_addr) {
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
size,
io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr,
io_sq->desc_addr.mem_handle);
+ }
} else {
ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
size,
io_sq->desc_addr.virt_addr,
ctx->numa_node,
dev_node);
- if (!io_sq->desc_addr.virt_addr)
+ if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ }
}
if (!io_sq->desc_addr.virt_addr) {
@@ -385,8 +398,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size_t size;
int prev_node = 0;
- ENA_TOUCH(ctx);
- memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
/* Use the basic completion descriptor for Rx */
io_cq->cdesc_entry_size_in_bytes =
@@ -397,17 +409,19 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
- size,
- io_cq->cdesc_addr.virt_addr,
- io_cq->cdesc_addr.phys_addr,
- ctx->numa_node,
- prev_node);
- if (!io_cq->cdesc_addr.virt_addr)
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle,
+ ctx->numa_node,
+ prev_node);
+ if (!io_cq->cdesc_addr.virt_addr) {
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
size,
io_cq->cdesc_addr.virt_addr,
io_cq->cdesc_addr.phys_addr,
io_cq->cdesc_addr.mem_handle);
+ }
if (!io_cq->cdesc_addr.virt_addr) {
ena_trc_err("memory allocation failed");
@@ -420,9 +434,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
return 0;
}
-static void
-ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_acq_entry *cqe)
+static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_acq_entry *cqe)
{
struct ena_comp_ctx *comp_ctx;
u16 cmd_id;
@@ -447,8 +460,7 @@ ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
}
-static void
-ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
+static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
{
struct ena_admin_acq_entry *cqe = NULL;
u16 comp_num = 0;
@@ -499,7 +511,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
return ENA_COM_NO_MEM;
case ENA_ADMIN_UNSUPPORTED_OPCODE:
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
case ENA_ADMIN_BAD_OPCODE:
case ENA_ADMIN_MALFORMED_REQUEST:
case ENA_ADMIN_ILLEGAL_PARAMETER:
@@ -510,20 +522,24 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
return 0;
}
-static int
-ena_com_wait_and_process_admin_cq_polling(
- struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
+static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
{
unsigned long flags = 0;
- u64 start_time;
+ unsigned long timeout;
int ret;
- start_time = ENA_GET_SYSTEM_USECS();
+ timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
+
+ while (1) {
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ if (comp_ctx->status != ENA_CMD_SUBMITTED)
+ break;
- while (comp_ctx->status == ENA_CMD_SUBMITTED) {
- if ((ENA_GET_SYSTEM_USECS() - start_time) >
- ADMIN_CMD_TIMEOUT_US) {
+ if (ENA_TIME_EXPIRE(timeout)) {
ena_trc_err("Wait for completion (polling) timeout\n");
/* ENA didn't have any completion */
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
@@ -535,9 +551,7 @@ ena_com_wait_and_process_admin_cq_polling(
goto err;
}
- ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
- ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ ENA_MSLEEP(ENA_POLL_MS);
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
@@ -549,8 +563,8 @@ ena_com_wait_and_process_admin_cq_polling(
goto err;
}
- ENA_ASSERT(comp_ctx->status == ENA_CMD_COMPLETED,
- "Invalid comp status %d\n", comp_ctx->status);
+ ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
+ "Invalid comp status %d\n", comp_ctx->status);
ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
err:
@@ -558,16 +572,14 @@ err:
return ret;
}
-static int
-ena_com_wait_and_process_admin_cq_interrupts(
- struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
+static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
{
unsigned long flags = 0;
- int ret = 0;
+ int ret;
ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
- ADMIN_CMD_TIMEOUT_US);
+ admin_queue->completion_timeout);
/* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors
@@ -607,16 +619,18 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
- u32 mmio_read_reg, ret;
+ u32 mmio_read_reg, ret, i;
unsigned long flags = 0;
- int i;
+ u32 timeout = mmio_read->reg_read_to;
ENA_MIGHT_SLEEP();
+ if (timeout == 0)
+ timeout = ENA_REG_READ_TIMEOUT;
+
/* If readless is disabled, perform regular read */
if (!mmio_read->readless_supported)
- return ENA_REG_READ32((unsigned char *)ena_dev->reg_bar +
- offset);
+ return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
mmio_read->seq_num++;
@@ -632,17 +646,16 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
*/
wmb();
- ENA_REG_WRITE32(mmio_read_reg, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_REG_READ_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
- for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
if (read_resp->req_id == mmio_read->seq_num)
break;
ENA_UDELAY(1);
}
- if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
+ if (unlikely(i == timeout)) {
ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num,
offset,
@@ -653,7 +666,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (read_resp->reg_off != offset) {
- ena_trc_err("reading failed for wrong offset value");
+ ena_trc_err("Read failure: wrong offset provided");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -671,9 +684,8 @@ err:
* It is expected that the IRQ called ena_com_handle_admin_completion
* to mark the completions.
*/
-static int
-ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
+static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
{
if (admin_queue->polling)
return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
@@ -692,7 +704,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
u8 direction;
int ret;
- memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
direction = ENA_ADMIN_SQ_DIRECTION_TX;
@@ -706,12 +718,11 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
destroy_cmd.sq.sq_idx = io_sq->idx;
destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&destroy_cmd,
- sizeof(destroy_cmd),
- (struct ena_admin_acq_entry *)&destroy_resp,
- sizeof(destroy_resp));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
ena_trc_err("failed to destroy io sq error: %d\n", ret);
@@ -747,18 +758,20 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
io_sq->desc_addr.phys_addr,
io_sq->desc_addr.mem_handle);
else
- ENA_MEM_FREE(ena_dev->dmadev,
- io_sq->desc_addr.virt_addr);
+ ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
io_sq->desc_addr.virt_addr = NULL;
}
}
-static int wait_for_reset_state(struct ena_com_dev *ena_dev,
- u32 timeout, u16 exp_state)
+static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+ u16 exp_state)
{
u32 val, i;
+ /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
+ timeout = (timeout * 100) / ENA_POLL_MS;
+
for (i = 0; i < timeout; i++) {
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
@@ -771,16 +784,14 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev,
exp_state)
return 0;
- /* The resolution of the timeout is 100ms */
- ENA_MSLEEP(100);
+ ENA_MSLEEP(ENA_POLL_MS);
}
return ENA_COM_TIMER_EXPIRED;
}
-static bool
-ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
- enum ena_admin_aq_feature_id feature_id)
+static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_feature_id feature_id)
{
u32 feature_mask = 1 << feature_id;
@@ -802,14 +813,9 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_cmd get_cmd;
int ret;
- if (!ena_dev) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
-
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- ena_trc_info("Feature %d isn't supported\n", feature_id);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n", feature_id);
+ return ENA_COM_UNSUPPORTED;
}
memset(&get_cmd, 0x0, sizeof(get_cmd));
@@ -945,10 +951,10 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
sizeof(struct ena_admin_rss_ind_table_entry);
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
- tbl_size,
- rss->rss_ind_tbl,
- rss->rss_ind_tbl_dma_addr,
- rss->rss_ind_tbl_mem_handle);
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
@@ -1005,7 +1011,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
u8 direction;
int ret;
- memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
@@ -1041,12 +1047,11 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
}
}
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&create_cmd,
- sizeof(create_cmd),
- (struct ena_admin_acq_entry *)&cmd_completion,
- sizeof(cmd_completion));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
if (unlikely(ret)) {
ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
return ret;
@@ -1133,9 +1138,8 @@ static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
return 0;
}
-static void
-ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
- u16 intr_delay_resolution)
+static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
+ u16 intr_delay_resolution)
{
struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
unsigned int i;
@@ -1165,13 +1169,18 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
size_t comp_size)
{
struct ena_comp_ctx *comp_ctx;
- int ret = 0;
+ int ret;
comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
comp, comp_size);
- if (unlikely(IS_ERR(comp_ctx))) {
- ena_trc_err("Failed to submit command [%ld]\n",
- PTR_ERR(comp_ctx));
+ if (IS_ERR(comp_ctx)) {
+ if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
+ ena_trc_dbg("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+ else
+ ena_trc_err("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+
return PTR_ERR(comp_ctx);
}
@@ -1195,7 +1204,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_create_cq_resp_desc cmd_completion;
int ret;
- memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
@@ -1215,12 +1224,11 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
return ret;
}
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&create_cmd,
- sizeof(create_cmd),
- (struct ena_admin_acq_entry *)&cmd_completion,
- sizeof(cmd_completion));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
if (unlikely(ret)) {
ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
return ret;
@@ -1290,7 +1298,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
- ENA_MSLEEP(20);
+ ENA_MSLEEP(ENA_POLL_MS);
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
}
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@@ -1304,17 +1312,16 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
int ret;
- memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
destroy_cmd.cq_idx = io_cq->idx;
destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&destroy_cmd,
- sizeof(destroy_cmd),
- (struct ena_admin_acq_entry *)&destroy_resp,
- sizeof(destroy_resp));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
@@ -1341,13 +1348,12 @@ void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
{
u16 depth = ena_dev->aenq.q_depth;
- ENA_ASSERT(ena_dev->aenq.head == depth, "Invalid AENQ state\n");
+ ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
/* Init head_db to mark that all entries in the queue
* are initially available
*/
- ENA_REG_WRITE32(depth, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AENQ_HEAD_DB_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
@@ -1356,12 +1362,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
struct ena_admin_get_feat_resp get_resp;
- int ret = 0;
-
- if (unlikely(!ena_dev)) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
+ int ret;
ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
if (ret) {
@@ -1373,7 +1374,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
get_resp.u.aenq.supported_groups,
groups_flag);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -1476,41 +1477,42 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
+ u16 size;
- if (!admin_queue)
- return;
-
+ ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
if (admin_queue->comp_ctx)
ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
admin_queue->comp_ctx = NULL;
-
- if (admin_queue->sq.entries)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- ADMIN_SQ_SIZE(admin_queue->q_depth),
- admin_queue->sq.entries,
- admin_queue->sq.dma_addr,
- admin_queue->sq.mem_handle);
- admin_queue->sq.entries = NULL;
-
- if (admin_queue->cq.entries)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- ADMIN_CQ_SIZE(admin_queue->q_depth),
- admin_queue->cq.entries,
- admin_queue->cq.dma_addr,
- admin_queue->cq.mem_handle);
- admin_queue->cq.entries = NULL;
-
+ size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ if (sq->entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
+ sq->dma_addr, sq->mem_handle);
+ sq->entries = NULL;
+
+ size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ if (cq->entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
+ cq->dma_addr, cq->mem_handle);
+ cq->entries = NULL;
+
+ size = ADMIN_AENQ_SIZE(aenq->q_depth);
if (ena_dev->aenq.entries)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- ADMIN_AENQ_SIZE(ena_dev->aenq.q_depth),
- ena_dev->aenq.entries,
- ena_dev->aenq.dma_addr,
- ena_dev->aenq.mem_handle);
- ena_dev->aenq.entries = NULL;
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
+ aenq->dma_addr, aenq->mem_handle);
+ aenq->entries = NULL;
}
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
{
+ u32 mask_value = 0;
+
+ if (polling)
+ mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+ ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
ena_dev->admin_queue.polling = polling;
}
@@ -1536,8 +1538,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
return 0;
}
-void
-ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@@ -1548,10 +1549,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
- ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_LO_OFF);
- ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
sizeof(*mmio_read->read_resp),
@@ -1570,10 +1569,8 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
}
int ena_com_admin_init(struct ena_com_dev *ena_dev,
@@ -1619,24 +1616,20 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
if (ret)
goto error;
- admin_queue->sq.db_addr = (u32 __iomem *)
- ((unsigned char *)ena_dev->reg_bar + ENA_REGS_AQ_DB_OFF);
+ admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ ENA_REGS_AQ_DB_OFF);
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AQ_BASE_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AQ_BASE_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_ACQ_BASE_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_ACQ_BASE_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
aq_caps = 0;
aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
@@ -1650,10 +1643,8 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
- ENA_REG_WRITE32(aq_caps, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AQ_CAPS_OFF);
- ENA_REG_WRITE32(acq_caps, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_ACQ_CAPS_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
if (ret)
goto error;
@@ -1672,7 +1663,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
{
struct ena_com_io_sq *io_sq;
struct ena_com_io_cq *io_cq;
- int ret = 0;
+ int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
@@ -1683,8 +1674,8 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
io_sq = &ena_dev->io_sq_queues[ctx->qid];
io_cq = &ena_dev->io_cq_queues[ctx->qid];
- memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
- memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
+ memset(io_sq, 0x0, sizeof(*io_sq));
+ memset(io_cq, 0x0, sizeof(*io_cq));
/* Init CQ */
io_cq->q_depth = ctx->queue_size;
@@ -1794,6 +1785,19 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
sizeof(get_resp.u.offload));
+ /* Driver hints isn't mandatory admin command. So in case the
+ * command isn't supported set driver hints to 0
+ */
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
+
+ if (!rc)
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+ sizeof(get_resp.u.hw_hints));
+ else if (rc == ENA_COM_UNSUPPORTED)
+ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
+ else
+ return rc;
+
return 0;
}
@@ -1826,6 +1830,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq;
ena_aenq_handler handler_cb;
+ unsigned long long timestamp;
u16 masked_head, processed = 0;
u8 phase;
@@ -1837,11 +1842,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* Go over all the events */
while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
phase) {
+ timestamp = (unsigned long long)aenq_common->timestamp_low |
+ ((unsigned long long)aenq_common->timestamp_high << 32);
+ ENA_TOUCH(timestamp); /* In case debug is disabled */
ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group,
aenq_common->syndrom,
- (unsigned long long)aenq_common->timestamp_low +
- ((u64)aenq_common->timestamp_high << 32));
+ timestamp);
/* Handle specific event*/
handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -1869,11 +1876,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- ENA_REG_WRITE32((u32)aenq->head, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_HEAD_DB_OFF);
+ ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
-int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason)
{
u32 stat, timeout, cap, reset_val;
int rc;
@@ -1901,8 +1908,9 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
/* start reset */
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
- ENA_REG_WRITE32(reset_val, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_DEV_CTL_OFF);
+ reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+ ENA_REGS_DEV_CTL_RESET_REASON_MASK;
+ ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
/* Write again the MMIO read request address */
ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
@@ -1915,29 +1923,32 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
}
/* reset done */
- ENA_REG_WRITE32(0, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_DEV_CTL_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
ena_trc_err("Reset indication didn't turn off\n");
return rc;
}
+ timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
+ ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ ena_dev->admin_queue.completion_timeout = timeout * 100000;
+ else
+ ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
return 0;
}
static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_aq_get_stats_cmd *get_cmd,
- struct ena_admin_acq_get_stats_resp *get_resp,
+ struct ena_com_stats_ctx *ctx,
enum ena_admin_get_stats_type type)
{
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
struct ena_com_admin_queue *admin_queue;
- int ret = 0;
-
- if (!ena_dev) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
+ int ret;
admin_queue = &ena_dev->admin_queue;
@@ -1945,12 +1956,11 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
get_cmd->aq_common_descriptor.flags = 0;
get_cmd->type = type;
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)get_cmd,
- sizeof(*get_cmd),
- (struct ena_admin_acq_entry *)get_resp,
- sizeof(*get_resp));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
if (unlikely(ret))
ena_trc_err("Failed to get stats. error: %d\n", ret);
@@ -1961,78 +1971,28 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
struct ena_admin_basic_stats *stats)
{
- int ret = 0;
- struct ena_admin_aq_get_stats_cmd get_cmd;
- struct ena_admin_acq_get_stats_resp get_resp;
+ struct ena_com_stats_ctx ctx;
+ int ret;
- memset(&get_cmd, 0x0, sizeof(get_cmd));
- ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
- ENA_ADMIN_GET_STATS_TYPE_BASIC);
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
if (likely(ret == 0))
- memcpy(stats, &get_resp.basic_stats,
- sizeof(get_resp.basic_stats));
+ memcpy(stats, &ctx.get_resp.basic_stats,
+ sizeof(ctx.get_resp.basic_stats));
return ret;
}
-int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
- u32 len)
-{
- int ret = 0;
- struct ena_admin_aq_get_stats_cmd get_cmd;
- struct ena_admin_acq_get_stats_resp get_resp;
- ena_mem_handle_t mem_handle = 0;
- void *virt_addr;
- dma_addr_t phys_addr;
-
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
- virt_addr, phys_addr, mem_handle);
- if (!virt_addr) {
- ret = ENA_COM_NO_MEM;
- goto done;
- }
- memset(&get_cmd, 0x0, sizeof(get_cmd));
- ret = ena_com_mem_addr_set(ena_dev,
- &get_cmd.u.control_buffer.address,
- phys_addr);
- if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
- return ret;
- }
- get_cmd.u.control_buffer.length = len;
-
- get_cmd.device_id = ena_dev->stats_func;
- get_cmd.queue_idx = ena_dev->stats_queue;
-
- ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
- ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
- if (ret < 0)
- goto free_ext_stats_mem;
-
- ret = snprintf(buff, len, "%s", (char *)virt_addr);
-
-free_ext_stats_mem:
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
- mem_handle);
-done:
- return ret;
-}
-
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
{
struct ena_com_admin_queue *admin_queue;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
- int ret = 0;
-
- if (unlikely(!ena_dev)) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
+ int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- ena_trc_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2049,11 +2009,10 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
- if (unlikely(ret)) {
+ if (unlikely(ret))
ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
- return ENA_COM_INVAL;
- }
- return 0;
+
+ return ret;
}
int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
@@ -2066,7 +2025,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
if (unlikely(ret)) {
ena_trc_err("Failed to get offload capabilities %d\n", ret);
- return ENA_COM_INVAL;
+ return ret;
}
memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
@@ -2085,9 +2044,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_FUNCTION)) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_FUNCTION);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ return ENA_COM_UNSUPPORTED;
}
/* Validate hash function is supported */
@@ -2099,7 +2058,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
ena_trc_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2158,7 +2117,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
ena_trc_err("Flow hash function %d isn't supported\n", func);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
switch (func) {
@@ -2207,7 +2166,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- rss->hash_func = (enum ena_admin_hash_functions)get_resp.u.flow_hash_func.selected_func;
+ rss->hash_func = get_resp.u.flow_hash_func.selected_func;
if (func)
*func = rss->hash_func;
@@ -2242,17 +2201,20 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
int ret;
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_INPUT)) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_INPUT);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
+ return ENA_COM_UNSUPPORTED;
}
+ memset(&cmd, 0x0, sizeof(cmd));
+
cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
cmd.aq_common_descriptor.flags =
ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
@@ -2268,20 +2230,17 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
ena_trc_err("memory address set failed\n");
return ret;
}
- cmd.control_buffer.length =
- sizeof(struct ena_admin_feature_rss_hash_control);
+ cmd.control_buffer.length = sizeof(*hash_ctrl);
ret = ena_com_execute_admin_command(admin_queue,
(struct ena_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
- if (unlikely(ret)) {
+ if (unlikely(ret))
ena_trc_err("Failed to set hash input. error: %d\n", ret);
- return ENA_COM_INVAL;
- }
- return 0;
+ return ret;
}
int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
@@ -2293,7 +2252,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
int rc, i;
/* Get the supported hash input */
- rc = ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+ rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
if (unlikely(rc))
return rc;
@@ -2322,7 +2281,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
@@ -2332,7 +2291,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
}
@@ -2340,7 +2299,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
/* In case of failure, restore the old hash ctrl */
if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
return rc;
}
@@ -2377,7 +2336,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
/* In case of failure, restore the old hash ctrl */
if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
return 0;
}
@@ -2404,14 +2363,13 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
- int ret = 0;
+ int ret;
- if (!ena_com_check_supported_feature_id(
- ena_dev,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
- return ENA_COM_PERMISSION;
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ return ENA_COM_UNSUPPORTED;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
@@ -2446,12 +2404,10 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
- if (unlikely(ret)) {
+ if (unlikely(ret))
ena_trc_err("Failed to set indirect table. error: %d\n", ret);
- return ENA_COM_INVAL;
- }
- return 0;
+ return ret;
}
int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
@@ -2538,17 +2494,18 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
}
int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
- u32 debug_area_size) {
+ u32 debug_area_size)
+{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
- debug_area_size,
- host_attr->debug_area_virt_addr,
- host_attr->debug_area_dma_addr,
- host_attr->debug_area_dma_handle);
- if (unlikely(!host_attr->debug_area_virt_addr)) {
- host_attr->debug_area_size = 0;
- return ENA_COM_NO_MEM;
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr,
+ host_attr->debug_area_dma_handle);
+ if (unlikely(!host_attr->debug_area_virt_addr)) {
+ host_attr->debug_area_size = 0;
+ return ENA_COM_NO_MEM;
}
host_attr->debug_area_size = debug_area_size;
@@ -2590,6 +2547,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
struct ena_com_admin_queue *admin_queue;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
+
int ret;
/* Host attribute config is called before ena_com_get_dev_attr_feat
@@ -2635,14 +2593,12 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
/* Interrupt moderation */
bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
{
- return ena_com_check_supported_feature_id(
- ena_dev,
- ENA_ADMIN_INTERRUPT_MODERATION);
+ return ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_INTERRUPT_MODERATION);
}
-int
-ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
- u32 tx_coalesce_usecs)
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs)
{
if (!ena_dev->intr_delay_resolution) {
ena_trc_err("Illegal interrupt delay granularity value\n");
@@ -2655,9 +2611,8 @@ ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
return 0;
}
-int
-ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
- u32 rx_coalesce_usecs)
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs)
{
if (!ena_dev->intr_delay_resolution) {
ena_trc_err("Illegal interrupt delay granularity value\n");
@@ -2690,9 +2645,9 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION);
if (rc) {
- if (rc == ENA_COM_PERMISSION) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_INTERRUPT_MODERATION);
+ if (rc == ENA_COM_UNSUPPORTED) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
@@ -2719,8 +2674,7 @@ err:
return rc;
}
-void
-ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
{
struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
@@ -2763,14 +2717,12 @@ ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
ENA_INTR_HIGHEST_BYTES;
}
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
{
return ena_dev->intr_moder_tx_interval;
}
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
{
struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
@@ -2794,7 +2746,10 @@ void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
intr_moder_tbl[level].intr_moder_interval /=
ena_dev->intr_delay_resolution;
intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
- intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
+
+ /* use hardcoded value until ethtool supports bytecount parameter */
+ if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
+ intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
}
void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h
index e5345926..f58cd86a 100644
--- a/drivers/net/ena/base/ena_com.h
+++ b/drivers/net/ena/base/ena_com.h
@@ -35,15 +35,7 @@
#define ENA_COM
#include "ena_plat.h"
-#include "ena_common_defs.h"
-#include "ena_admin_defs.h"
-#include "ena_eth_io_defs.h"
-#include "ena_regs_defs.h"
-#if defined(__linux__) && !defined(__KERNEL__)
-#include <rte_lcore.h>
-#include <rte_spinlock.h>
-#define __iomem
-#endif
+#include "ena_includes.h"
#define ENA_MAX_NUM_IO_QUEUES 128U
/* We need to queues for each IO (on for Tx and one for Rx) */
@@ -89,6 +81,11 @@
#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6
#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4
+#define ENA_INTR_MODER_LEVEL_STRIDE 1
+#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
+
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+
enum ena_intr_moder_level {
ENA_INTR_MODER_LOWEST = 0,
ENA_INTR_MODER_LOW,
@@ -120,8 +117,8 @@ struct ena_com_rx_buf_info {
};
struct ena_com_io_desc_addr {
- u8 __iomem *pbuf_dev_addr; /* LLQ address */
- u8 *virt_addr;
+ u8 __iomem *pbuf_dev_addr; /* LLQ address */
+ u8 *virt_addr;
dma_addr_t phys_addr;
ena_mem_handle_t mem_handle;
};
@@ -130,13 +127,12 @@ struct ena_com_tx_meta {
u16 mss;
u16 l3_hdr_len;
u16 l3_hdr_offset;
- u16 l3_outer_hdr_len; /* In words */
- u16 l3_outer_hdr_offset;
u16 l4_hdr_len; /* In words */
};
struct ena_com_io_cq {
struct ena_com_io_desc_addr cdesc_addr;
+ void *bus;
/* Interrupt unmask register */
u32 __iomem *unmask_reg;
@@ -174,6 +170,7 @@ struct ena_com_io_cq {
struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
+ void *bus;
u32 __iomem *db_addr;
u8 __iomem *header_addr;
@@ -228,8 +225,11 @@ struct ena_com_stats_admin {
struct ena_com_admin_queue {
void *q_dmadev;
+ void *bus;
ena_spinlock_t q_lock; /* spinlock for the admin queue */
+
struct ena_comp_ctx *comp_ctx;
+ u32 completion_timeout;
u16 q_depth;
struct ena_com_admin_cq cq;
struct ena_com_admin_sq sq;
@@ -266,6 +266,7 @@ struct ena_com_mmio_read {
struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
dma_addr_t read_resp_dma_addr;
ena_mem_handle_t read_resp_mem_handle;
+ u32 reg_read_to; /* in us */
u16 seq_num;
bool readless_supported;
/* spin lock to ensure a single outstanding read */
@@ -316,6 +317,7 @@ struct ena_com_dev {
u8 __iomem *reg_bar;
void __iomem *mem_bar;
void *dmadev;
+ void *bus;
enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size;
@@ -340,6 +342,7 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_device_attr_feature_desc dev_attr;
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
+ struct ena_admin_ena_hw_hints hw_hints;
};
struct ena_com_create_io_ctx {
@@ -379,7 +382,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
* @ena_dev: ENA communication layer struct
- * @realess_supported: readless mode (enable/disable)
+ * @readless_supported: readless mode (enable/disable)
*/
void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
bool readless_supported);
@@ -421,14 +424,16 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
/* ena_com_dev_reset - Perform device FLR to the device.
* @ena_dev: ENA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
*
* @return - 0 on success, negative value on failure.
*/
-int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason);
/* ena_com_create_io_queue - Create io queue.
* @ena_dev: ENA communication layer struct
- * ena_com_create_io_ctx - create context structure
+ * @ctx - create context structure
*
* Create the submission and the completion queues.
*
@@ -437,8 +442,9 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev);
int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
struct ena_com_create_io_ctx *ctx);
-/* ena_com_admin_destroy - Destroy IO queue with the queue id - qid.
+/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
* @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
*/
void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
@@ -581,9 +587,8 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
*
* @return: 0 on Success and negative value otherwise.
*/
-int
-ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx);
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx);
/* ena_com_get_dev_basic_stats - Get device basic statistics
* @ena_dev: ENA communication layer struct
@@ -608,9 +613,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
*
* @return: 0 on Success and negative value otherwise.
*/
-int
-ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload);
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload);
/* ena_com_rss_init - Init RSS
* @ena_dev: ENA communication layer struct
@@ -765,8 +769,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
*
* Retrieve the RSS indirection table from the device.
*
- * @note: If the caller called ena_com_indirect_table_fill_entry but didn't
- * flash it to the device, the new configuration will be lost.
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
*/
@@ -874,8 +878,7 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
* moderation table back to the default parameters.
* @ena_dev: ENA communication layer struct
*/
-void
-ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
* non-adaptive interval in Tx direction.
@@ -884,9 +887,8 @@ ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
*
* @return - 0 on success, negative value on failure.
*/
-int
-ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
- u32 tx_coalesce_usecs);
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs);
/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
* non-adaptive interval in Rx direction.
@@ -895,9 +897,8 @@ ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
*
* @return - 0 on success, negative value on failure.
*/
-int
-ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
- u32 rx_coalesce_usecs);
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs);
/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
* non-adaptive interval in Tx direction.
@@ -905,8 +906,7 @@ ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
*
* @return - interval in usec
*/
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
* non-adaptive interval in Rx direction.
@@ -914,8 +914,7 @@ ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
*
* @return - interval in usec
*/
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
* moderation table.
@@ -940,20 +939,17 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
enum ena_intr_moder_level level,
struct ena_intr_moder_entry *entry);
-static inline bool
-ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
+static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
}
-static inline void
-ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
+static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
{
ena_dev->adaptive_coalescing = true;
}
-static inline void
-ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
+static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
{
ena_dev->adaptive_coalescing = false;
}
@@ -966,12 +962,11 @@ ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
* @moder_tbl_idx: Current table level as input update new level as return
* value.
*/
-static inline void
-ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
- unsigned int pkts,
- unsigned int bytes,
- unsigned int *smoothed_interval,
- unsigned int *moder_tbl_idx)
+static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
+ unsigned int pkts,
+ unsigned int bytes,
+ unsigned int *smoothed_interval,
+ unsigned int *moder_tbl_idx)
{
enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
struct ena_intr_moder_entry *curr_moder_entry;
@@ -1001,17 +996,20 @@ ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
if ((pkts > curr_moder_entry->pkts_per_interval) ||
(bytes > curr_moder_entry->bytes_per_interval))
- new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1);
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
} else {
- pred_moder_entry = &intr_moder_tbl[curr_moder_idx - 1];
+ pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
if ((pkts <= pred_moder_entry->pkts_per_interval) ||
(bytes <= pred_moder_entry->bytes_per_interval))
- new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx - 1);
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
else if ((pkts > curr_moder_entry->pkts_per_interval) ||
(bytes > curr_moder_entry->bytes_per_interval)) {
if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
- new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1);
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
}
}
new_moder_entry = &intr_moder_tbl[new_moder_idx];
@@ -1044,18 +1042,12 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
intr_reg->intr_control |=
(tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
- & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+ & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
if (unmask)
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
-int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
- u32 len);
-
-int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
- u32 funct_queue);
-
#if defined(__cplusplus)
}
#endif /* __cplusplus */
diff --git a/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/drivers/net/ena/base/ena_defs/ena_admin_defs.h
index 7a031d90..04d4e9a5 100644
--- a/drivers/net/ena/base/ena_defs/ena_admin_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_admin_defs.h
@@ -34,174 +34,140 @@
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
-/* admin commands opcodes */
enum ena_admin_aq_opcode {
- /* create submission queue */
- ENA_ADMIN_CREATE_SQ = 1,
+ ENA_ADMIN_CREATE_SQ = 1,
- /* destroy submission queue */
- ENA_ADMIN_DESTROY_SQ = 2,
+ ENA_ADMIN_DESTROY_SQ = 2,
- /* create completion queue */
- ENA_ADMIN_CREATE_CQ = 3,
+ ENA_ADMIN_CREATE_CQ = 3,
- /* destroy completion queue */
- ENA_ADMIN_DESTROY_CQ = 4,
+ ENA_ADMIN_DESTROY_CQ = 4,
- /* get capabilities of particular feature */
- ENA_ADMIN_GET_FEATURE = 8,
+ ENA_ADMIN_GET_FEATURE = 8,
- /* get capabilities of particular feature */
- ENA_ADMIN_SET_FEATURE = 9,
+ ENA_ADMIN_SET_FEATURE = 9,
- /* get statistics */
- ENA_ADMIN_GET_STATS = 11,
+ ENA_ADMIN_GET_STATS = 11,
};
-/* admin command completion status codes */
enum ena_admin_aq_completion_status {
- /* Request completed successfully */
- ENA_ADMIN_SUCCESS = 0,
+ ENA_ADMIN_SUCCESS = 0,
- /* no resources to satisfy request */
- ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
- /* Bad opcode in request descriptor */
- ENA_ADMIN_BAD_OPCODE = 2,
+ ENA_ADMIN_BAD_OPCODE = 2,
- /* Unsupported opcode in request descriptor */
- ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
- /* Wrong request format */
- ENA_ADMIN_MALFORMED_REQUEST = 4,
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
- /* One of parameters is not valid. Provided in ACQ entry
- * extended_status
- */
- ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+ /* Additional status is provided in ACQ entry extended_status */
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
- /* unexpected error */
- ENA_ADMIN_UNKNOWN_ERROR = 6,
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
};
-/* get/set feature subcommands opcodes */
enum ena_admin_aq_feature_id {
- /* list of all supported attributes/capabilities in the ENA */
- ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
- /* max number of supported queues per for every queues type */
- ENA_ADMIN_MAX_QUEUES_NUM = 2,
+ ENA_ADMIN_HW_HINTS = 3,
- /* Receive Side Scaling (RSS) function */
- ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
- /* stateless TCP/UDP/IP offload capabilities. */
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
- /* Multiple tuples flow table configuration */
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
- /* max MTU, current MTU */
- ENA_ADMIN_MTU = 14,
+ ENA_ADMIN_MTU = 14,
- /* Receive Side Scaling (RSS) hash input */
- ENA_ADMIN_RSS_HASH_INPUT = 18,
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
- /* interrupt moderation parameters */
- ENA_ADMIN_INTERRUPT_MODERATION = 20,
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
- /* AENQ configuration */
- ENA_ADMIN_AENQ_CONFIG = 26,
+ ENA_ADMIN_AENQ_CONFIG = 26,
- /* Link configuration */
- ENA_ADMIN_LINK_CONFIG = 27,
+ ENA_ADMIN_LINK_CONFIG = 27,
- /* Host attributes configuration */
- ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
- /* Number of valid opcodes */
- ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
-/* descriptors and headers placement */
enum ena_admin_placement_policy_type {
- /* descriptors and headers are in OS memory */
- ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+ /* descriptors and headers are in host memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
- /* descriptors and headers in device memory (a.k.a Low Latency
+ /* descriptors and headers are in device memory (a.k.a Low Latency
* Queue)
*/
- ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
};
-/* link speeds */
enum ena_admin_link_types {
- ENA_ADMIN_LINK_SPEED_1G = 0x1,
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
- ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
- ENA_ADMIN_LINK_SPEED_5G = 0x4,
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
- ENA_ADMIN_LINK_SPEED_10G = 0x8,
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
- ENA_ADMIN_LINK_SPEED_25G = 0x10,
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
- ENA_ADMIN_LINK_SPEED_40G = 0x20,
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
- ENA_ADMIN_LINK_SPEED_50G = 0x40,
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
- ENA_ADMIN_LINK_SPEED_100G = 0x80,
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
- ENA_ADMIN_LINK_SPEED_200G = 0x100,
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
- ENA_ADMIN_LINK_SPEED_400G = 0x200,
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
};
-/* completion queue update policy */
enum ena_admin_completion_policy_type {
- /* cqe for each sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
+ /* completion queue entry for each sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
- /* cqe upon request in sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
+ /* completion queue entry upon request in sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
/* current queue head pointer is updated in OS memory upon sq
* descriptor request
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
/* current queue head pointer is updated in OS memory for each sq
* descriptor
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
};
-/* type of get statistics command */
+/* basic stats return ena_admin_basic_stats while extanded stats return a
+ * buffer (string format) with additional statistics per queue and per
+ * device id
+ */
enum ena_admin_get_stats_type {
- /* Basic statistics */
- ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
- /* Extended statistics */
- ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
};
-/* scope of get statistics command */
enum ena_admin_get_stats_scope {
- ENA_ADMIN_SPECIFIC_QUEUE = 0,
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
- ENA_ADMIN_ETH_TRAFFIC = 1,
+ ENA_ADMIN_ETH_TRAFFIC = 1,
};
-/* ENA Admin Queue (AQ) common descriptor */
struct ena_admin_aq_common_desc {
- /* word 0 : */
- /* command identificator to associate it with the completion
- * 11:0 : command_id
+ /* 11:0 : command_id
* 15:12 : reserved12
*/
uint16_t command_id;
- /* as appears in ena_aq_opcode */
+ /* as appears in ena_admin_aq_opcode */
uint8_t opcode;
/* 0 : phase
@@ -214,24 +180,17 @@ struct ena_admin_aq_common_desc {
uint8_t flags;
};
-/* used in ena_aq_entry. Can point directly to control data, or to a page
- * list chunk. Used also at the end of indirect mode page list chunks, for
- * chaining.
+/* used in ena_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
*/
struct ena_admin_ctrl_buff_info {
- /* word 0 : indicates length of the buffer pointed by
- * control_buffer_address.
- */
uint32_t length;
- /* words 1:2 : points to control buffer (direct or indirect) */
struct ena_common_mem_addr address;
};
-/* submission queue full identification */
struct ena_admin_sq {
- /* word 0 : */
- /* queue id */
uint16_t sq_idx;
/* 4:0 : reserved
@@ -242,36 +201,25 @@ struct ena_admin_sq {
uint8_t reserved1;
};
-/* AQ entry format */
struct ena_admin_aq_entry {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : */
union {
- /* command specific inline data */
uint32_t inline_data_w1[3];
- /* words 1:3 : points to control buffer (direct or
- * indirect, chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
} u;
- /* command specific inline data */
uint32_t inline_data_w4[12];
};
-/* ENA Admin Completion Queue (ACQ) common descriptor */
struct ena_admin_acq_common_desc {
- /* word 0 : */
/* command identifier to associate it with the aq descriptor
* 11:0 : command_id
* 15:12 : reserved12
*/
uint16_t command;
- /* status of request execution */
uint8_t status;
/* 0 : phase
@@ -279,33 +227,21 @@ struct ena_admin_acq_common_desc {
*/
uint8_t flags;
- /* word 1 : */
- /* provides additional info */
uint16_t extended_status;
- /* submission queue head index, serves as a hint what AQ entries can
- * be revoked
- */
+ /* serves as a hint what AQ entries can be revoked */
uint16_t sq_head_indx;
};
-/* ACQ entry format */
struct ena_admin_acq_entry {
- /* words 0:1 : */
struct ena_admin_acq_common_desc acq_common_descriptor;
- /* response type specific data */
uint32_t response_specific_data[14];
};
-/* ENA AQ Create Submission Queue command. Placed in control buffer pointed
- * by AQ entry
- */
struct ena_admin_aq_create_sq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* word 1 : */
/* 4:0 : reserved0_w1
* 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
*/
@@ -337,7 +273,6 @@ struct ena_admin_aq_create_sq_cmd {
*/
uint8_t sq_caps_3;
- /* word 2 : */
/* associated completion queue id. This CQ must be created prior to
* SQ creation
*/
@@ -346,85 +281,62 @@ struct ena_admin_aq_create_sq_cmd {
/* submission queue depth in entries */
uint16_t sq_depth;
- /* words 3:4 : SQ physical base address in OS memory. This field
- * should not be used for Low Latency queues. Has to be page
- * aligned.
+ /* SQ physical base address in OS memory. This field should not be
+ * used for Low Latency queues. Has to be page aligned.
*/
struct ena_common_mem_addr sq_ba;
- /* words 5:6 : specifies queue head writeback location in OS
- * memory. Valid if completion_policy is set to
- * completion_policy_head_on_demand or completion_policy_head. Has
- * to be cache aligned
+ /* specifies queue head writeback location in OS memory. Valid if
+ * completion_policy is set to completion_policy_head_on_demand or
+ * completion_policy_head. Has to be cache aligned
*/
struct ena_common_mem_addr sq_head_writeback;
- /* word 7 : reserved word */
uint32_t reserved0_w7;
- /* word 8 : reserved word */
uint32_t reserved0_w8;
};
-/* submission queue direction */
enum ena_admin_sq_direction {
- ENA_ADMIN_SQ_DIRECTION_TX = 1,
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
- ENA_ADMIN_SQ_DIRECTION_RX = 2,
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
};
-/* ENA Response for Create SQ Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_create_sq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
- /* word 2 : */
- /* sq identifier */
uint16_t sq_idx;
uint16_t reserved;
- /* word 3 : queue doorbell address as an offset to PCIe MMIO REG BAR */
+ /* queue doorbell address as an offset to PCIe MMIO REG BAR */
uint32_t sq_doorbell_offset;
- /* word 4 : low latency queue ring base address as an offset to
- * PCIe MMIO LLQ_MEM BAR
+ /* low latency queue ring base address as an offset to PCIe MMIO
+ * LLQ_MEM BAR
*/
uint32_t llq_descriptors_offset;
- /* word 5 : low latency queue headers' memory as an offset to PCIe
- * MMIO LLQ_MEM BAR
+ /* low latency queue headers' memory as an offset to PCIe MMIO
+ * LLQ_MEM BAR
*/
uint32_t llq_headers_offset;
};
-/* ENA AQ Destroy Submission Queue command. Placed in control buffer
- * pointed by AQ entry
- */
struct ena_admin_aq_destroy_sq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1 : */
struct ena_admin_sq sq;
};
-/* ENA Response for Destroy SQ Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_destroy_sq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
};
-/* ENA AQ Create Completion Queue command */
struct ena_admin_aq_create_cq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* word 1 : */
/* 4:0 : reserved5
* 5 : interrupt_mode_enabled - if set, cq operates
* in interrupt mode, otherwise - polling
@@ -441,62 +353,39 @@ struct ena_admin_aq_create_cq_cmd {
/* completion queue depth in # of entries. must be power of 2 */
uint16_t cq_depth;
- /* word 2 : msix vector assigned to this cq */
+ /* msix vector assigned to this cq */
uint32_t msix_vector;
- /* words 3:4 : cq physical base address in OS memory. CQ must be
- * physically contiguous
+ /* cq physical base address in OS memory. CQ must be physically
+ * contiguous
*/
struct ena_common_mem_addr cq_ba;
};
-/* ENA Response for Create CQ Command. Appears in ACQ entry as response
- * specific data
- */
struct ena_admin_acq_create_cq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
- /* word 2 : */
- /* cq identifier */
uint16_t cq_idx;
- /* actual cq depth in # of entries */
+ /* actual cq depth in number of entries */
uint16_t cq_actual_depth;
- /* word 3 : cpu numa node address as an offset to PCIe MMIO REG BAR */
uint32_t numa_node_register_offset;
- /* word 4 : completion head doorbell address as an offset to PCIe
- * MMIO REG BAR
- */
uint32_t cq_head_db_register_offset;
- /* word 5 : interrupt unmask register address as an offset into
- * PCIe MMIO REG BAR
- */
uint32_t cq_interrupt_unmask_register_offset;
};
-/* ENA AQ Destroy Completion Queue command. Placed in control buffer
- * pointed by AQ entry
- */
struct ena_admin_aq_destroy_cq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* word 1 : */
- /* associated queue id. */
uint16_t cq_idx;
uint16_t reserved1;
};
-/* ENA Response for Destroy CQ Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_destroy_cq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
};
@@ -504,21 +393,15 @@ struct ena_admin_acq_destroy_cq_resp_desc {
* buffer pointed by AQ entry
*/
struct ena_admin_aq_get_stats_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : */
union {
/* command specific inline data */
uint32_t inline_data_w1[3];
- /* words 1:3 : points to control buffer (direct or
- * indirect, chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
} u;
- /* word 4 : */
/* stats type as defined in enum ena_admin_get_stats_type */
uint8_t type;
@@ -527,7 +410,6 @@ struct ena_admin_aq_get_stats_cmd {
uint16_t reserved3;
- /* word 5 : */
/* queue id. used when scope is specific_queue */
uint16_t queue_idx;
@@ -539,89 +421,60 @@ struct ena_admin_aq_get_stats_cmd {
/* Basic Statistics Command. */
struct ena_admin_basic_stats {
- /* word 0 : */
uint32_t tx_bytes_low;
- /* word 1 : */
uint32_t tx_bytes_high;
- /* word 2 : */
uint32_t tx_pkts_low;
- /* word 3 : */
uint32_t tx_pkts_high;
- /* word 4 : */
uint32_t rx_bytes_low;
- /* word 5 : */
uint32_t rx_bytes_high;
- /* word 6 : */
uint32_t rx_pkts_low;
- /* word 7 : */
uint32_t rx_pkts_high;
- /* word 8 : */
uint32_t rx_drops_low;
- /* word 9 : */
uint32_t rx_drops_high;
};
-/* ENA Response for Get Statistics Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_get_stats_resp {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
- /* words 2:11 : */
struct ena_admin_basic_stats basic_stats;
};
-/* ENA Get/Set Feature common descriptor. Appears as inline word in
- * ena_aq_entry
- */
struct ena_admin_get_set_feature_common_desc {
- /* word 0 : */
/* 1:0 : select - 0x1 - current value; 0x3 - default
* value
* 7:3 : reserved3
*/
uint8_t flags;
- /* as appears in ena_feature_id */
+ /* as appears in ena_admin_aq_feature_id */
uint8_t feature_id;
- /* reserved16 */
uint16_t reserved16;
};
-/* ENA Device Attributes Feature descriptor. */
struct ena_admin_device_attr_feature_desc {
- /* word 0 : implementation id */
uint32_t impl_id;
- /* word 1 : device version */
uint32_t device_version;
- /* word 2 : bit map of which bits are supported value of 1
- * indicated that this feature is supported and can perform SET/GET
- * for it
- */
+ /* bitmap of ena_admin_aq_feature_id */
uint32_t supported_features;
- /* word 3 : */
uint32_t reserved3;
- /* word 4 : Indicates how many bits are used physical address
- * access.
- */
+ /* Indicates how many bits are used physical address access. */
uint32_t phys_addr_width;
- /* word 5 : Indicates how many bits are used virtual address access. */
+ /* Indicates how many bits are used virtual address access. */
uint32_t virt_addr_width;
/* unicast MAC address (in Network byte order) */
@@ -629,36 +482,27 @@ struct ena_admin_device_attr_feature_desc {
uint8_t reserved7[2];
- /* word 8 : Max supported MTU value */
uint32_t max_mtu;
};
-/* ENA Max Queues Feature descriptor. */
struct ena_admin_queue_feature_desc {
- /* word 0 : Max number of submission queues (including LLQs) */
+ /* including LLQs */
uint32_t max_sq_num;
- /* word 1 : Max submission queue depth */
uint32_t max_sq_depth;
- /* word 2 : Max number of completion queues */
uint32_t max_cq_num;
- /* word 3 : Max completion queue depth */
uint32_t max_cq_depth;
- /* word 4 : Max number of LLQ submission queues */
uint32_t max_llq_num;
- /* word 5 : Max submission queue depth of LLQ */
uint32_t max_llq_depth;
- /* word 6 : Max header size */
uint32_t max_header_size;
- /* word 7 : */
- /* Maximum Descriptors number, including meta descriptors, allowed
- * for a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for
+ * a single Tx packet
*/
uint16_t max_packet_tx_descs;
@@ -666,86 +510,69 @@ struct ena_admin_queue_feature_desc {
uint16_t max_packet_rx_descs;
};
-/* ENA MTU Set Feature descriptor. */
struct ena_admin_set_feature_mtu_desc {
- /* word 0 : mtu payload size (exclude L2) */
+ /* exclude L2 */
uint32_t mtu;
};
-/* ENA host attributes Set Feature descriptor. */
struct ena_admin_set_feature_host_attr_desc {
- /* words 0:1 : host OS info base address in OS memory. host info is
- * 4KB of physically contiguous
+ /* host OS info base address in OS memory. host info is 4KB of
+ * physically contiguous
*/
struct ena_common_mem_addr os_info_ba;
- /* words 2:3 : host debug area base address in OS memory. debug
- * area must be physically contiguous
+ /* host debug area base address in OS memory. debug area must be
+ * physically contiguous
*/
struct ena_common_mem_addr debug_ba;
- /* word 4 : debug area size */
+ /* debug area size */
uint32_t debug_area_size;
};
-/* ENA Interrupt Moderation Get Feature descriptor. */
struct ena_admin_feature_intr_moder_desc {
- /* word 0 : */
/* interrupt delay granularity in usec */
uint16_t intr_delay_resolution;
uint16_t reserved;
};
-/* ENA Link Get Feature descriptor. */
struct ena_admin_get_feature_link_desc {
- /* word 0 : Link speed in Mb */
+ /* Link speed in Mb */
uint32_t speed;
- /* word 1 : supported speeds (bit field of enum ena_admin_link
- * types)
- */
+ /* bit field of enum ena_admin_link types */
uint32_t supported;
- /* word 2 : */
- /* 0 : autoneg - auto negotiation
+ /* 0 : autoneg
* 1 : duplex - Full Duplex
* 31:2 : reserved2
*/
uint32_t flags;
};
-/* ENA AENQ Feature descriptor. */
struct ena_admin_feature_aenq_desc {
- /* word 0 : bitmask for AENQ groups the device can report */
+ /* bitmask for AENQ groups the device can report */
uint32_t supported_groups;
- /* word 1 : bitmask for AENQ groups to report */
+ /* bitmask for AENQ groups to report */
uint32_t enabled_groups;
};
-/* ENA Stateless Offload Feature descriptor. */
struct ena_admin_feature_offload_desc {
- /* word 0 : */
- /* Trasmit side stateless offload
- * 0 : TX_L3_csum_ipv4 - IPv4 checksum
- * 1 : TX_L4_ipv4_csum_part - TCP/UDP over IPv4
- * checksum, the checksum field should be initialized
- * with pseudo header checksum
- * 2 : TX_L4_ipv4_csum_full - TCP/UDP over IPv4
- * checksum
- * 3 : TX_L4_ipv6_csum_part - TCP/UDP over IPv6
- * checksum, the checksum field should be initialized
- * with pseudo header checksum
- * 4 : TX_L4_ipv6_csum_full - TCP/UDP over IPv6
- * checksum
- * 5 : tso_ipv4 - TCP/IPv4 Segmentation Offloading
- * 6 : tso_ipv6 - TCP/IPv6 Segmentation Offloading
- * 7 : tso_ecn - TCP Segmentation with ECN
+ /* 0 : TX_L3_csum_ipv4
+ * 1 : TX_L4_ipv4_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 2 : TX_L4_ipv4_csum_full
+ * 3 : TX_L4_ipv6_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 4 : TX_L4_ipv6_csum_full
+ * 5 : tso_ipv4
+ * 6 : tso_ipv6
+ * 7 : tso_ecn
*/
uint32_t tx;
- /* word 1 : */
/* Receive side supported stateless offload
* 0 : RX_L3_csum_ipv4 - IPv4 checksum
* 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
@@ -754,118 +581,94 @@ struct ena_admin_feature_offload_desc {
*/
uint32_t rx_supported;
- /* word 2 : */
- /* Receive side enabled stateless offload */
uint32_t rx_enabled;
};
-/* hash functions */
enum ena_admin_hash_functions {
- /* Toeplitz hash */
- ENA_ADMIN_TOEPLITZ = 1,
+ ENA_ADMIN_TOEPLITZ = 1,
- /* CRC32 hash */
- ENA_ADMIN_CRC32 = 2,
+ ENA_ADMIN_CRC32 = 2,
};
-/* ENA RSS flow hash control buffer structure */
struct ena_admin_feature_rss_flow_hash_control {
- /* word 0 : number of valid keys */
uint32_t keys_num;
- /* word 1 : */
uint32_t reserved;
- /* Toeplitz keys */
uint32_t key[10];
};
-/* ENA RSS Flow Hash Function */
struct ena_admin_feature_rss_flow_hash_function {
- /* word 0 : */
- /* supported hash functions
- * 7:0 : funcs - supported hash functions (bitmask
- * accroding to ena_admin_hash_functions)
- */
+ /* 7:0 : funcs - bitmask of ena_admin_hash_functions */
uint32_t supported_func;
- /* word 1 : */
- /* selected hash func
- * 7:0 : selected_func - selected hash function
- * (bitmask accroding to ena_admin_hash_functions)
+ /* 7:0 : selected_func - bitmask of
+ * ena_admin_hash_functions
*/
uint32_t selected_func;
- /* word 2 : initial value */
+ /* initial value */
uint32_t init_val;
};
/* RSS flow hash protocols */
enum ena_admin_flow_hash_proto {
- /* tcp/ipv4 */
- ENA_ADMIN_RSS_TCP4 = 0,
+ ENA_ADMIN_RSS_TCP4 = 0,
- /* udp/ipv4 */
- ENA_ADMIN_RSS_UDP4 = 1,
+ ENA_ADMIN_RSS_UDP4 = 1,
- /* tcp/ipv6 */
- ENA_ADMIN_RSS_TCP6 = 2,
+ ENA_ADMIN_RSS_TCP6 = 2,
- /* udp/ipv6 */
- ENA_ADMIN_RSS_UDP6 = 3,
+ ENA_ADMIN_RSS_UDP6 = 3,
- /* ipv4 not tcp/udp */
- ENA_ADMIN_RSS_IP4 = 4,
+ ENA_ADMIN_RSS_IP4 = 4,
- /* ipv6 not tcp/udp */
- ENA_ADMIN_RSS_IP6 = 5,
+ ENA_ADMIN_RSS_IP6 = 5,
- /* fragmented ipv4 */
- ENA_ADMIN_RSS_IP4_FRAG = 6,
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
- /* not ipv4/6 */
- ENA_ADMIN_RSS_NOT_IP = 7,
+ ENA_ADMIN_RSS_NOT_IP = 7,
- /* max number of protocols */
- ENA_ADMIN_RSS_PROTO_NUM = 16,
+ /* TCPv6 with extension header */
+ ENA_ADMIN_RSS_TCP6_EX = 8,
+
+ /* IPv6 with extension header */
+ ENA_ADMIN_RSS_IP6_EX = 9,
+
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
};
/* RSS flow hash fields */
enum ena_admin_flow_hash_fields {
/* Ethernet Dest Addr */
- ENA_ADMIN_RSS_L2_DA = 0,
+ ENA_ADMIN_RSS_L2_DA = BIT(0),
/* Ethernet Src Addr */
- ENA_ADMIN_RSS_L2_SA = 1,
+ ENA_ADMIN_RSS_L2_SA = BIT(1),
/* ipv4/6 Dest Addr */
- ENA_ADMIN_RSS_L3_DA = 2,
+ ENA_ADMIN_RSS_L3_DA = BIT(2),
/* ipv4/6 Src Addr */
- ENA_ADMIN_RSS_L3_SA = 5,
+ ENA_ADMIN_RSS_L3_SA = BIT(3),
/* tcp/udp Dest Port */
- ENA_ADMIN_RSS_L4_DP = 6,
+ ENA_ADMIN_RSS_L4_DP = BIT(4),
/* tcp/udp Src Port */
- ENA_ADMIN_RSS_L4_SP = 7,
+ ENA_ADMIN_RSS_L4_SP = BIT(5),
};
-/* hash input fields for flow protocol */
struct ena_admin_proto_input {
- /* word 0 : */
/* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
uint16_t fields;
uint16_t reserved2;
};
-/* ENA RSS hash control buffer structure */
struct ena_admin_feature_rss_hash_control {
- /* supported input fields */
struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
- /* selected input fields */
struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];
@@ -873,11 +676,9 @@ struct ena_admin_feature_rss_hash_control {
struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];
};
-/* ENA RSS flow hash input */
struct ena_admin_feature_rss_flow_hash_input {
- /* word 0 : */
/* supported hash input sorting
- * 1 : L3_sort - support swap L3 addresses if DA
+ * 1 : L3_sort - support swap L3 addresses if DA is
* smaller than SA
* 2 : L4_sort - support swap L4 ports if DP smaller
* SP
@@ -893,46 +694,37 @@ struct ena_admin_feature_rss_flow_hash_input {
uint16_t enabled_input_sort;
};
-/* Operating system type */
enum ena_admin_os_type {
- /* Linux OS */
- ENA_ADMIN_OS_LINUX = 1,
+ ENA_ADMIN_OS_LINUX = 1,
- /* Windows OS */
- ENA_ADMIN_OS_WIN = 2,
+ ENA_ADMIN_OS_WIN = 2,
- /* DPDK OS */
- ENA_ADMIN_OS_DPDK = 3,
+ ENA_ADMIN_OS_DPDK = 3,
- /* FreeBSD OS */
- ENA_ADMIN_OS_FREEBSD = 4,
+ ENA_ADMIN_OS_FREEBSD = 4,
- /* PXE OS */
- ENA_ADMIN_OS_IPXE = 5,
+ ENA_ADMIN_OS_IPXE = 5,
};
-/* host info */
struct ena_admin_host_info {
- /* word 0 : OS type defined in enum ena_os_type */
+ /* defined in enum ena_admin_os_type */
uint32_t os_type;
/* os distribution string format */
uint8_t os_dist_str[128];
- /* word 33 : OS distribution numeric format */
+ /* OS distribution numeric format */
uint32_t os_dist;
/* kernel version string format */
uint8_t kernel_ver_str[32];
- /* word 42 : Kernel version numeric format */
+ /* Kernel version numeric format */
uint32_t kernel_ver;
- /* word 43 : */
- /* driver version
- * 7:0 : major - major
- * 15:8 : minor - minor
- * 23:16 : sub_minor - sub minor
+ /* 7:0 : major
+ * 15:8 : minor
+ * 23:16 : sub_minor
*/
uint32_t driver_version;
@@ -940,220 +732,200 @@ struct ena_admin_host_info {
uint32_t supported_network_features[4];
};
-/* ENA RSS indirection table entry */
struct ena_admin_rss_ind_table_entry {
- /* word 0 : */
- /* cq identifier */
uint16_t cq_idx;
uint16_t reserved;
};
-/* ENA RSS indirection table */
struct ena_admin_feature_rss_ind_table {
- /* word 0 : */
/* min supported table size (2^min_size) */
uint16_t min_size;
/* max supported table size (2^max_size) */
uint16_t max_size;
- /* word 1 : */
/* table size (2^size) */
uint16_t size;
uint16_t reserved;
- /* word 2 : index of the inline entry. 0xFFFFFFFF means invalid */
+ /* index of the inline entry. 0xFFFFFFFF means invalid */
uint32_t inline_index;
- /* words 3 : used for updating single entry, ignored when setting
- * the entire table through the control buffer.
+ /* used for updating single entry, ignored when setting the entire
+ * table through the control buffer.
*/
struct ena_admin_rss_ind_table_entry inline_entry;
};
-/* ENA Get Feature command */
+/* When hint value is 0, driver should use it's own predefined value */
+struct ena_admin_ena_hw_hints {
+ /* value in ms */
+ uint16_t mmio_read_timeout;
+
+ /* value in ms */
+ uint16_t driver_watchdog_timeout;
+
+ /* Per packet tx completion timeout. value in ms */
+ uint16_t missing_tx_completion_timeout;
+
+ uint16_t missed_tx_completion_count_threshold_to_reset;
+
+ /* value in ms */
+ uint16_t admin_completion_tx_timeout;
+
+ uint16_t netdev_wd_timeout;
+
+ uint16_t max_tx_sgl_size;
+
+ uint16_t max_rx_sgl_size;
+
+ uint16_t reserved[8];
+};
+
struct ena_admin_get_feat_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : points to control buffer (direct or indirect,
- * chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
- /* words 4 : */
struct ena_admin_get_set_feature_common_desc feat_common;
- /* words 5:15 : */
- union {
- /* raw words */
- uint32_t raw[11];
- } u;
+ uint32_t raw[11];
};
-/* ENA Get Feature command response */
struct ena_admin_get_feat_resp {
- /* words 0:1 : */
struct ena_admin_acq_common_desc acq_common_desc;
- /* words 2:15 : */
union {
- /* raw words */
uint32_t raw[14];
- /* words 2:10 : Get Device Attributes */
struct ena_admin_device_attr_feature_desc dev_attr;
- /* words 2:5 : Max queues num */
struct ena_admin_queue_feature_desc max_queue;
- /* words 2:3 : AENQ configuration */
struct ena_admin_feature_aenq_desc aenq;
- /* words 2:4 : Get Link configuration */
struct ena_admin_get_feature_link_desc link;
- /* words 2:4 : offload configuration */
struct ena_admin_feature_offload_desc offload;
- /* words 2:4 : rss flow hash function */
struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
- /* words 2 : rss flow hash input */
struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
- /* words 2:3 : rss indirection table */
struct ena_admin_feature_rss_ind_table ind_table;
- /* words 2 : interrupt moderation configuration */
struct ena_admin_feature_intr_moder_desc intr_moderation;
+
+ struct ena_admin_ena_hw_hints hw_hints;
} u;
};
-/* ENA Set Feature command */
struct ena_admin_set_feat_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : points to control buffer (direct or indirect,
- * chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
- /* words 4 : */
struct ena_admin_get_set_feature_common_desc feat_common;
- /* words 5:15 : */
union {
- /* raw words */
uint32_t raw[11];
- /* words 5 : mtu size */
+ /* mtu size */
struct ena_admin_set_feature_mtu_desc mtu;
- /* words 5:7 : host attributes */
+ /* host attributes */
struct ena_admin_set_feature_host_attr_desc host_attr;
- /* words 5:6 : AENQ configuration */
+ /* AENQ configuration */
struct ena_admin_feature_aenq_desc aenq;
- /* words 5:7 : rss flow hash function */
+ /* rss flow hash function */
struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
- /* words 5 : rss flow hash input */
+ /* rss flow hash input */
struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
- /* words 5:6 : rss indirection table */
+ /* rss indirection table */
struct ena_admin_feature_rss_ind_table ind_table;
} u;
};
-/* ENA Set Feature command response */
struct ena_admin_set_feat_resp {
- /* words 0:1 : */
struct ena_admin_acq_common_desc acq_common_desc;
- /* words 2:15 : */
union {
- /* raw words */
uint32_t raw[14];
} u;
};
-/* ENA Asynchronous Event Notification Queue descriptor. */
struct ena_admin_aenq_common_desc {
- /* word 0 : */
uint16_t group;
uint16_t syndrom;
- /* word 1 : */
/* 0 : phase */
uint8_t flags;
uint8_t reserved1[3];
- /* word 2 : Timestamp LSB */
uint32_t timestamp_low;
- /* word 3 : Timestamp MSB */
uint32_t timestamp_high;
};
/* asynchronous event notification groups */
enum ena_admin_aenq_group {
- /* Link State Change */
- ENA_ADMIN_LINK_CHANGE = 0,
+ ENA_ADMIN_LINK_CHANGE = 0,
- ENA_ADMIN_FATAL_ERROR = 1,
+ ENA_ADMIN_FATAL_ERROR = 1,
- ENA_ADMIN_WARNING = 2,
+ ENA_ADMIN_WARNING = 2,
- ENA_ADMIN_NOTIFICATION = 3,
+ ENA_ADMIN_NOTIFICATION = 3,
- ENA_ADMIN_KEEP_ALIVE = 4,
+ ENA_ADMIN_KEEP_ALIVE = 4,
- ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
};
-/* syndorm of AENQ notification group */
enum ena_admin_aenq_notification_syndrom {
- ENA_ADMIN_SUSPEND = 0,
+ ENA_ADMIN_SUSPEND = 0,
+
+ ENA_ADMIN_RESUME = 1,
- ENA_ADMIN_RESUME = 1,
+ ENA_ADMIN_UPDATE_HINTS = 2,
};
-/* ENA Asynchronous Event Notification generic descriptor. */
struct ena_admin_aenq_entry {
- /* words 0:3 : */
struct ena_admin_aenq_common_desc aenq_common_desc;
/* command specific inline data */
uint32_t inline_data_w4[12];
};
-/* ENA Asynchronous Event Notification Queue Link Change descriptor. */
struct ena_admin_aenq_link_change_desc {
- /* words 0:3 : */
struct ena_admin_aenq_common_desc aenq_common_desc;
- /* word 4 : */
/* 0 : link_status */
uint32_t flags;
};
-/* ENA MMIO Readless response interface */
+struct ena_admin_aenq_keep_alive_desc {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ uint32_t rx_drops_low;
+
+ uint32_t rx_drops_high;
+};
+
struct ena_admin_ena_mmio_req_read_less_resp {
- /* word 0 : */
- /* request id */
uint16_t req_id;
- /* register offset */
uint16_t reg_off;
- /* word 1 : value is valid when poll is cleared */
+ /* value is valid when poll is cleared */
uint32_t reg_val;
};
@@ -1220,8 +992,7 @@ struct ena_admin_ena_mmio_req_read_less_resp {
/* feature_rss_flow_hash_function */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
-#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK \
- GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0)
/* feature_rss_flow_hash_input */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
@@ -1247,653 +1018,392 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
#if !defined(ENA_DEFS_LINUX_MAINLINE)
-static inline uint16_t
-get_ena_admin_aq_common_desc_command_id(
- const struct ena_admin_aq_common_desc *p)
+static inline uint16_t get_ena_admin_aq_common_desc_command_id(const struct ena_admin_aq_common_desc *p)
{
return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline void
-set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p,
- uint16_t val)
+static inline void set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, uint16_t val)
{
p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
+static inline uint8_t get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
{
return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
}
-static inline void
-set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_common_desc_ctrl_data(
- const struct ena_admin_aq_common_desc *p)
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data(const struct ena_admin_aq_common_desc *p)
{
- return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >>
- ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
}
-static inline void
-set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, uint8_t val)
{
- p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT)
- & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_common_desc_ctrl_data_indirect(
- const struct ena_admin_aq_common_desc *p)
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data_indirect(const struct ena_admin_aq_common_desc *p)
{
- return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK)
- >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
}
-static inline void
-set_ena_admin_aq_common_desc_ctrl_data_indirect(
- struct ena_admin_aq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aq_common_desc_ctrl_data_indirect(struct ena_admin_aq_common_desc *p, uint8_t val)
{
- p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT)
- & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
}
-static inline uint8_t
-get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
+static inline uint8_t get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
{
- return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK)
- >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
+ return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
}
-static inline void
-set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
+static inline void set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
{
- p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
- ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+ p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
}
-static inline uint16_t
-get_ena_admin_acq_common_desc_command_id(
- const struct ena_admin_acq_common_desc *p)
+static inline uint16_t get_ena_admin_acq_common_desc_command_id(const struct ena_admin_acq_common_desc *p)
{
return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline void
-set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p,
- uint16_t val)
+static inline void set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, uint16_t val)
{
p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline uint8_t
-get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
+static inline uint8_t get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
{
return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
}
-static inline void
-set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_sq_direction(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_sq_direction(const struct ena_admin_aq_create_sq_cmd *p)
{
- return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK)
- >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
+ return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_sq_direction(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_sq_direction(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
- p->sq_identity |= (val <<
- ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT)
- & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+ p->sq_identity |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_placement_policy(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_placement_policy(const struct ena_admin_aq_create_sq_cmd *p)
{
return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_placement_policy(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_placement_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_completion_policy(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_completion_policy(const struct ena_admin_aq_create_sq_cmd *p)
{
- return (p->sq_caps_2
- & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK)
- >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
+ return (p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_completion_policy(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_completion_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
- p->sq_caps_2 |=
- (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT)
- & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+ p->sq_caps_2 |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(const struct ena_admin_aq_create_sq_cmd *p)
{
- return p->sq_caps_3 &
- ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+ return p->sq_caps_3 & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
- p->sq_caps_3 |= val &
- ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+ p->sq_caps_3 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(
- const struct ena_admin_aq_create_cq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(const struct ena_admin_aq_create_cq_cmd *p)
{
- return (p->cq_caps_1 &
- ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK)
- >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
+ return (p->cq_caps_1 & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
}
-static inline void
-set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(
- struct ena_admin_aq_create_cq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
{
- p->cq_caps_1 |=
- (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT)
- & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+ p->cq_caps_1 |= (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(
- const struct ena_admin_aq_create_cq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(const struct ena_admin_aq_create_cq_cmd *p)
{
- return p->cq_caps_2
- & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ return p->cq_caps_2 & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
}
-static inline void
-set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(
- struct ena_admin_aq_create_cq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
{
- p->cq_caps_2 |=
- val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ p->cq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
}
-static inline uint8_t
-get_ena_admin_get_set_feature_common_desc_select(
- const struct ena_admin_get_set_feature_common_desc *p)
+static inline uint8_t get_ena_admin_get_set_feature_common_desc_select(const struct ena_admin_get_set_feature_common_desc *p)
{
return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
}
-static inline void
-set_ena_admin_get_set_feature_common_desc_select(
- struct ena_admin_get_set_feature_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_get_set_feature_common_desc_select(struct ena_admin_get_set_feature_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
}
-static inline uint32_t
-get_ena_admin_get_feature_link_desc_autoneg(
- const struct ena_admin_get_feature_link_desc *p)
+static inline uint32_t get_ena_admin_get_feature_link_desc_autoneg(const struct ena_admin_get_feature_link_desc *p)
{
return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
}
-static inline void
-set_ena_admin_get_feature_link_desc_autoneg(
- struct ena_admin_get_feature_link_desc *p,
- uint32_t val)
+static inline void set_ena_admin_get_feature_link_desc_autoneg(struct ena_admin_get_feature_link_desc *p, uint32_t val)
{
p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
}
-static inline uint32_t
-get_ena_admin_get_feature_link_desc_duplex(
- const struct ena_admin_get_feature_link_desc *p)
+static inline uint32_t get_ena_admin_get_feature_link_desc_duplex(const struct ena_admin_get_feature_link_desc *p)
{
- return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK)
- >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
+ return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
}
-static inline void
-set_ena_admin_get_feature_link_desc_duplex(
- struct ena_admin_get_feature_link_desc *p,
- uint32_t val)
+static inline void set_ena_admin_get_feature_link_desc_duplex(struct ena_admin_get_feature_link_desc *p, uint32_t val)
{
- p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT)
- & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
+ p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
{
return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_tso_ipv4(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv4(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_tso_ipv4(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_tso_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_tso_ipv6(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv6(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_tso_ipv6(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_tso_ipv6(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_tso_ecn(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ecn(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_tso_ecn(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_tso_ecn(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
{
- return p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+ return p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+ p->rx_supported |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(const struct ena_admin_feature_offload_desc *p)
{
- return (p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(const struct ena_admin_feature_offload_desc *p)
{
- return (p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_hash(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_hash(const struct ena_admin_feature_offload_desc *p)
{
- return (p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_hash(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_hash(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_rss_flow_hash_function_funcs(
- const struct ena_admin_feature_rss_flow_hash_function *p)
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_funcs(const struct ena_admin_feature_rss_flow_hash_function *p)
{
- return p->supported_func &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+ return p->supported_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_function_funcs(
- struct ena_admin_feature_rss_flow_hash_function *p,
- uint32_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_function_funcs(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
{
- p->supported_func |=
- val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+ p->supported_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_rss_flow_hash_function_selected_func(
- const struct ena_admin_feature_rss_flow_hash_function *p)
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_selected_func(const struct ena_admin_feature_rss_flow_hash_function *p)
{
- return p->selected_func &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+ return p->selected_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_function_selected_func(
- struct ena_admin_feature_rss_flow_hash_function *p,
- uint32_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_function_selected_func(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
{
- p->selected_func |=
- val &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+ p->selected_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_L3_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->supported_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
+ return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_L3_sort(
- struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->supported_input_sort |=
- (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
+ p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_L4_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->supported_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
+ return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_L4_sort(
- struct ena_admin_feature_rss_flow_hash_input *p,
- uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->supported_input_sort |=
- (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+ p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->enabled_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
+ return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(
- struct ena_admin_feature_rss_flow_hash_input *p,
- uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->enabled_input_sort |=
- (val <<
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
+ p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->enabled_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
+ return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(
- struct ena_admin_feature_rss_flow_hash_input *p,
- uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->enabled_input_sort |=
- (val <<
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
+ p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
}
-static inline uint32_t
-get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
+static inline uint32_t get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
{
return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
}
-static inline void
-set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
+static inline void set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
{
p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
}
-static inline uint32_t
-get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
+static inline uint32_t get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
{
- return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK)
- >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
}
-static inline void
-set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
+static inline void set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
{
- p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT)
- & ENA_ADMIN_HOST_INFO_MINOR_MASK;
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_MINOR_MASK;
}
-static inline uint32_t
-get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
+static inline uint32_t get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
{
- return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK)
- >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
}
-static inline void
-set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
+static inline void set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
{
- p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT)
- & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
}
-static inline uint8_t
-get_ena_admin_aenq_common_desc_phase(
- const struct ena_admin_aenq_common_desc *p)
+static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p)
{
return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
}
-static inline void
-set_ena_admin_aenq_common_desc_phase(
- struct ena_admin_aenq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aenq_common_desc_phase(struct ena_admin_aenq_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
}
-static inline uint32_t
-get_ena_admin_aenq_link_change_desc_link_status(
- const struct ena_admin_aenq_link_change_desc *p)
+static inline uint32_t get_ena_admin_aenq_link_change_desc_link_status(const struct ena_admin_aenq_link_change_desc *p)
{
return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
}
-static inline void
-set_ena_admin_aenq_link_change_desc_link_status(
- struct ena_admin_aenq_link_change_desc *p,
- uint32_t val)
+static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_admin_aenq_link_change_desc *p, uint32_t val)
{
p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
}
diff --git a/drivers/net/ena/base/ena_defs/ena_common_defs.h b/drivers/net/ena/base/ena_defs/ena_common_defs.h
index 95e0f389..072e6c1f 100644
--- a/drivers/net/ena/base/ena_defs/ena_common_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_common_defs.h
@@ -34,17 +34,13 @@
#ifndef _ENA_COMMON_H_
#define _ENA_COMMON_H_
-/* spec version */
-#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* spec version major */
-#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* spec version minor */
+#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
+#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
struct ena_common_mem_addr {
- /* word 0 : low 32 bit of the memory address */
uint32_t mem_addr_low;
- /* word 1 : */
- /* high 16 bits of the memory address */
uint16_t mem_addr_high;
/* MBZ */
diff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
index 6bc3d6a7..4cf0b205 100644
--- a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
@@ -34,35 +34,30 @@
#ifndef _ENA_ETH_IO_H_
#define _ENA_ETH_IO_H_
-/* Layer 3 protocol index */
enum ena_eth_io_l3_proto_index {
- ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
- ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
- ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
- ENA_ETH_IO_L3_PROTO_FCOE = 21,
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
- ENA_ETH_IO_L3_PROTO_ROCE = 22,
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
};
-/* Layer 4 protocol index */
enum ena_eth_io_l4_proto_index {
- ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
- ENA_ETH_IO_L4_PROTO_TCP = 12,
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
- ENA_ETH_IO_L4_PROTO_UDP = 13,
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
- ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
};
-/* ENA IO Queue Tx descriptor */
struct ena_eth_io_tx_desc {
- /* word 0 : */
- /* length, request id and control flags
- * 15:0 : length - Buffer length in bytes, must
+ /* 15:0 : length - Buffer length in bytes, must
* include any packet trailers that the ENA supposed
* to update like End-to-End CRC, Authentication GMAC
* etc. This length must not include the
@@ -85,9 +80,7 @@ struct ena_eth_io_tx_desc {
*/
uint32_t len_ctrl;
- /* word 1 : */
- /* ethernet control
- * 3:0 : l3_proto_idx - L3 protocol. This field
+ /* 3:0 : l3_proto_idx - L3 protocol. This field
* required when l3_csum_en,l3_csum or tso_en are set.
* 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
* DF flags of the IPv4 header is 0. Otherwise must
@@ -119,10 +112,8 @@ struct ena_eth_io_tx_desc {
*/
uint32_t meta_ctrl;
- /* word 2 : Buffer address bits[31:0] */
uint32_t buff_addr_lo;
- /* word 3 : */
/* address high and header size
* 15:0 : addr_hi - Buffer Pointer[47:32]
* 23:16 : reserved16_w2
@@ -141,20 +132,16 @@ struct ena_eth_io_tx_desc {
uint32_t buff_addr_hi_hdr_sz;
};
-/* ENA IO Queue Tx Meta descriptor */
struct ena_eth_io_tx_meta_desc {
- /* word 0 : */
- /* length, request id and control flags
- * 9:0 : req_id_lo - Request ID[9:0]
+ /* 9:0 : req_id_lo - Request ID[9:0]
* 11:10 : reserved10 - MBZ
* 12 : reserved12 - MBZ
* 13 : reserved13 - MBZ
* 14 : ext_valid - if set, offset fields in Word2
- * are valid Also MSS High in Word 0 and Outer L3
- * Offset High in WORD 0 and bits [31:24] in Word 3
- * 15 : word3_valid - If set Crypto Info[23:0] of
- * Word 3 is valid
- * 19:16 : mss_hi_ptp
+ * are valid Also MSS High in Word 0 and bits [31:24]
+ * in Word 3
+ * 15 : reserved15
+ * 19:16 : mss_hi
* 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
* Extended Metadata Descriptor
* 21 : meta_store - Store extended metadata in queue
@@ -175,19 +162,13 @@ struct ena_eth_io_tx_meta_desc {
*/
uint32_t len_ctrl;
- /* word 1 : */
- /* word 1
- * 5:0 : req_id_hi
+ /* 5:0 : req_id_hi
* 31:6 : reserved6 - MBZ
*/
uint32_t word1;
- /* word 2 : */
- /* word 2
- * 7:0 : l3_hdr_len - the header length L3 IP header.
- * 15:8 : l3_hdr_off - the offset of the first byte
- * in the L3 header from the beginning of the to-be
- * transmitted packet.
+ /* 7:0 : l3_hdr_len
+ * 15:8 : l3_hdr_off
* 21:16 : l4_hdr_len_in_words - counts the L4 header
* length in words. there is an explicit assumption
* that L4 header appears right after L3 header and
@@ -196,13 +177,10 @@ struct ena_eth_io_tx_meta_desc {
*/
uint32_t word2;
- /* word 3 : */
uint32_t reserved;
};
-/* ENA IO Queue Tx completions descriptor */
struct ena_eth_io_tx_cdesc {
- /* word 0 : */
/* Request ID[15:0] */
uint16_t req_id;
@@ -214,24 +192,19 @@ struct ena_eth_io_tx_cdesc {
*/
uint8_t flags;
- /* word 1 : */
uint16_t sub_qid;
- /* indicates location of submission queue head */
uint16_t sq_head_idx;
};
-/* ENA IO Queue Rx descriptor */
struct ena_eth_io_rx_desc {
- /* word 0 : */
/* In bytes. 0 means 64KB */
uint16_t length;
/* MBZ */
uint8_t reserved2;
- /* control flags
- * 0 : phase
+ /* 0 : phase
* 1 : reserved1 - MBZ
* 2 : first - Indicates first descriptor in
* transaction
@@ -242,32 +215,27 @@ struct ena_eth_io_rx_desc {
*/
uint8_t ctrl;
- /* word 1 : */
uint16_t req_id;
/* MBZ */
uint16_t reserved6;
- /* word 2 : Buffer address bits[31:0] */
uint32_t buff_addr_lo;
- /* word 3 : */
- /* Buffer Address bits[47:16] */
uint16_t buff_addr_hi;
/* MBZ */
uint16_t reserved16_w3;
};
-/* ENA IO Queue Rx Completion Base Descriptor (4-word format). Note: all
- * ethernet parsing information are valid only when last=1
+/* 4-word format Note: all ethernet parsing information are valid only when
+ * last=1
*/
struct ena_eth_io_rx_cdesc_base {
- /* word 0 : */
- /* 4:0 : l3_proto_idx - L3 protocol index
- * 6:5 : src_vlan_cnt - Source VLAN count
+ /* 4:0 : l3_proto_idx
+ * 6:5 : src_vlan_cnt
* 7 : reserved7 - MBZ
- * 12:8 : l4_proto_idx - L4 protocol index
+ * 12:8 : l4_proto_idx
* 13 : l3_csum_err - when set, either the L3
* checksum error detected, or, the controller didn't
* validate the checksum. This bit is valid only when
@@ -292,56 +260,43 @@ struct ena_eth_io_rx_cdesc_base {
*/
uint32_t status;
- /* word 1 : */
uint16_t length;
uint16_t req_id;
- /* word 2 : 32-bit hash result */
+ /* 32-bit hash result */
uint32_t hash;
- /* word 3 : */
- /* submission queue number */
uint16_t sub_qid;
uint16_t reserved;
};
-/* ENA IO Queue Rx Completion Descriptor (8-word format) */
+/* 8-word format */
struct ena_eth_io_rx_cdesc_ext {
- /* words 0:3 : Rx Completion Extended */
struct ena_eth_io_rx_cdesc_base base;
- /* word 4 : Completed Buffer address bits[31:0] */
uint32_t buff_addr_lo;
- /* word 5 : */
- /* the buffer address used bits[47:32] */
uint16_t buff_addr_hi;
uint16_t reserved16;
- /* word 6 : Reserved */
uint32_t reserved_w6;
- /* word 7 : Reserved */
uint32_t reserved_w7;
};
-/* ENA Interrupt Unmask Register */
struct ena_eth_io_intr_reg {
- /* word 0 : */
- /* 14:0 : rx_intr_delay - rx interrupt delay value
- * 29:15 : tx_intr_delay - tx interrupt delay value
- * 30 : intr_unmask - if set, unmasks interrupt
+ /* 14:0 : rx_intr_delay
+ * 29:15 : tx_intr_delay
+ * 30 : intr_unmask
* 31 : reserved
*/
uint32_t intr_control;
};
-/* ENA NUMA Node configuration register */
struct ena_eth_io_numa_node_cfg_reg {
- /* word 0 : */
/* 7:0 : numa
* 30:8 : reserved
* 31 : enabled
@@ -388,10 +343,8 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
-#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT 15
-#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK BIT(15)
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT 16
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
@@ -463,803 +416,544 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
#if !defined(ENA_DEFS_LINUX_MAINLINE)
-static inline uint32_t get_ena_eth_io_tx_desc_length(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_length(const struct ena_eth_io_tx_desc *p)
{
return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
}
-static inline void set_ena_eth_io_tx_desc_length(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_length(struct ena_eth_io_tx_desc *p, uint32_t val)
{
p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK)
- >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_req_id_hi(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_req_id_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT)
- & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK)
- >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_meta_desc(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_meta_desc(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT)
- & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_phase(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_phase(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK)
- >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_phase(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_phase(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT)
- & ENA_ETH_IO_TX_DESC_PHASE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_first(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_first(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK)
- >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_first(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_first(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT)
- & ENA_ETH_IO_TX_DESC_FIRST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_DESC_FIRST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_last(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_last(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK)
- >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_last(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_last(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT)
- & ENA_ETH_IO_TX_DESC_LAST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_DESC_LAST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_comp_req(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_comp_req(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK)
- >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_comp_req(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_comp_req(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT)
- & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(const struct ena_eth_io_tx_desc *p)
{
return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
}
-static inline void set_ena_eth_io_tx_desc_l3_proto_idx(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l3_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
{
p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_DF(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_DF(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK)
- >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_DF(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_DF(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_DF_SHIFT)
- & ENA_ETH_IO_TX_DESC_DF_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) & ENA_ETH_IO_TX_DESC_DF_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_tso_en(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_tso_en(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK)
- >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_tso_en(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_tso_en(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT)
- & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK)
- >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l4_proto_idx(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l4_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT)
- & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK)
- >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l3_csum_en(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l3_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT)
- & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK)
- >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l4_csum_en(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l4_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT)
- & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK)
- >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT)
- & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK)
- >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l4_csum_partial(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l4_csum_partial(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT)
- & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK)
- >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_req_id_lo(
- struct ena_eth_io_tx_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_desc_req_id_lo(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT)
- & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(const struct ena_eth_io_tx_desc *p)
{
return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
}
-static inline void set_ena_eth_io_tx_desc_addr_hi(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_addr_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
{
p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_header_length(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_header_length(const struct ena_eth_io_tx_desc *p)
{
- return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK)
- >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
+ return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_header_length(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_header_length(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->buff_addr_hi_hdr_sz |=
- (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT)
- & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+ p->buff_addr_hi_hdr_sz |= (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(const struct ena_eth_io_tx_meta_desc *p)
{
return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
}
-static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(
- const struct ena_eth_io_tx_meta_desc *p)
-{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK)
- >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
-}
-
-static inline void set_ena_eth_io_tx_meta_desc_ext_valid(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
-{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
-}
-
-static inline uint32_t get_ena_eth_io_tx_meta_desc_word3_valid(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK)
- >> ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_word3_valid(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_ext_valid(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi_ptp(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK)
- >> ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_mss_hi_ptp(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_mss_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK)
- >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK)
- >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_meta_store(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_meta_store(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK)
- >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_meta_desc(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_meta_desc(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK)
- >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_phase(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_phase(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_first(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_first(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK)
- >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_first(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_first(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_last(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_last(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK)
- >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_last(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_last(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK)
- >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_comp_req(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_comp_req(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(const struct ena_eth_io_tx_meta_desc *p)
{
return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
}
-static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(const struct ena_eth_io_tx_meta_desc *p)
{
return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
}
-static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK)
- >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->word2 |=
- (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK)
- >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->word2 |=
- (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK)
- >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_mss_lo(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_mss_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->word2 |=
- (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
}
-static inline uint8_t get_ena_eth_io_tx_cdesc_phase(
- const struct ena_eth_io_tx_cdesc *p)
+static inline uint8_t get_ena_eth_io_tx_cdesc_phase(const struct ena_eth_io_tx_cdesc *p)
{
return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
}
-static inline void set_ena_eth_io_tx_cdesc_phase(
- struct ena_eth_io_tx_cdesc *p,
- uint8_t val)
+static inline void set_ena_eth_io_tx_cdesc_phase(struct ena_eth_io_tx_cdesc *p, uint8_t val)
{
p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_phase(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_phase(const struct ena_eth_io_rx_desc *p)
{
return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK;
}
-static inline void set_ena_eth_io_rx_desc_phase(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_phase(struct ena_eth_io_rx_desc *p, uint8_t val)
{
p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_first(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_first(const struct ena_eth_io_rx_desc *p)
{
- return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK)
- >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_rx_desc_first(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_first(struct ena_eth_io_rx_desc *p, uint8_t val)
{
- p->ctrl |=
- (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT)
- & ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) & ENA_ETH_IO_RX_DESC_FIRST_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_last(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_last(const struct ena_eth_io_rx_desc *p)
{
- return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK)
- >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
}
-static inline void set_ena_eth_io_rx_desc_last(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_last(struct ena_eth_io_rx_desc *p, uint8_t val)
{
- p->ctrl |=
- (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT)
- & ENA_ETH_IO_RX_DESC_LAST_MASK;
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) & ENA_ETH_IO_RX_DESC_LAST_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_comp_req(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_comp_req(const struct ena_eth_io_rx_desc *p)
{
- return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK)
- >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
}
-static inline void set_ena_eth_io_rx_desc_comp_req(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_comp_req(struct ena_eth_io_rx_desc *p, uint8_t val)
{
- p->ctrl |=
- (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT)
- & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
{
return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(
- struct ena_eth_io_rx_cdesc_base *p,
- uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(
- struct ena_eth_io_rx_cdesc_base *p,
- uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |=
- (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_phase(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_phase(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_first(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_first(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_last(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_last(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_buffer(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_buffer(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
}
-static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(
- const struct ena_eth_io_intr_reg *p)
+static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(const struct ena_eth_io_intr_reg *p)
{
return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
}
-static inline void set_ena_eth_io_intr_reg_rx_intr_delay(
- struct ena_eth_io_intr_reg *p, uint32_t val)
+static inline void set_ena_eth_io_intr_reg_rx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
{
p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
}
-static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(
- const struct ena_eth_io_intr_reg *p)
+static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(const struct ena_eth_io_intr_reg *p)
{
- return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK)
- >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
}
-static inline void set_ena_eth_io_intr_reg_tx_intr_delay(
- struct ena_eth_io_intr_reg *p, uint32_t val)
+static inline void set_ena_eth_io_intr_reg_tx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
{
- p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
- & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+ p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
}
-static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(
- const struct ena_eth_io_intr_reg *p)
+static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(const struct ena_eth_io_intr_reg *p)
{
- return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK)
- >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
}
-static inline void set_ena_eth_io_intr_reg_intr_unmask(
- struct ena_eth_io_intr_reg *p, uint32_t val)
+static inline void set_ena_eth_io_intr_reg_intr_unmask(struct ena_eth_io_intr_reg *p, uint32_t val)
{
- p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT)
- & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+ p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
-static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(
- const struct ena_eth_io_numa_node_cfg_reg *p)
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(const struct ena_eth_io_numa_node_cfg_reg *p)
{
return p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
}
-static inline void set_ena_eth_io_numa_node_cfg_reg_numa(
- struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+static inline void set_ena_eth_io_numa_node_cfg_reg_numa(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
{
p->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
}
-static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(
- const struct ena_eth_io_numa_node_cfg_reg *p)
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(const struct ena_eth_io_numa_node_cfg_reg *p)
{
- return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK)
- >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;
+ return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;
}
-static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(
- struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
{
- p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT)
- & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+ p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
}
#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
diff --git a/drivers/net/ena/base/ena_defs/ena_gen_info.h b/drivers/net/ena/base/ena_defs/ena_gen_info.h
index 3d252096..e87bcfd8 100644
--- a/drivers/net/ena/base/ena_defs/ena_gen_info.h
+++ b/drivers/net/ena/base/ena_defs/ena_gen_info.h
@@ -31,5 +31,5 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#define ENA_GEN_DATE "Sun Jun 5 10:24:39 IDT 2016"
-#define ENA_GEN_COMMIT "17146ed"
+#define ENA_GEN_DATE "Sun Oct 23 12:27:32 IDT 2016"
+#define ENA_GEN_COMMIT "79d82fa"
diff --git a/drivers/net/ena/base/ena_defs/ena_includes.h b/drivers/net/ena/base/ena_defs/ena_includes.h
index a86c876f..30a920a8 100644
--- a/drivers/net/ena/base/ena_defs/ena_includes.h
+++ b/drivers/net/ena/base/ena_defs/ena_includes.h
@@ -35,5 +35,3 @@
#include "ena_regs_defs.h"
#include "ena_admin_defs.h"
#include "ena_eth_io_defs.h"
-#include "ena_efa_admin_defs.h"
-#include "ena_efa_io_defs.h"
diff --git a/drivers/net/ena/base/ena_defs/ena_regs_defs.h b/drivers/net/ena/base/ena_defs/ena_regs_defs.h
index d0241278..b0870f25 100644
--- a/drivers/net/ena/base/ena_defs/ena_regs_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_regs_defs.h
@@ -34,6 +34,38 @@
#ifndef _ENA_REGS_H_
#define _ENA_REGS_H_
+enum ena_regs_reset_reason_types {
+ ENA_REGS_RESET_NORMAL = 0,
+
+ ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
+
+ ENA_REGS_RESET_ADMIN_TO = 2,
+
+ ENA_REGS_RESET_MISS_TX_CMPL = 3,
+
+ ENA_REGS_RESET_INV_RX_REQ_ID = 4,
+
+ ENA_REGS_RESET_INV_TX_REQ_ID = 5,
+
+ ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
+
+ ENA_REGS_RESET_INIT_ERR = 7,
+
+ ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
+
+ ENA_REGS_RESET_OS_TRIGGER = 9,
+
+ ENA_REGS_RESET_OS_NETDEV_WD = 10,
+
+ ENA_REGS_RESET_SHUTDOWN = 11,
+
+ ENA_REGS_RESET_USER_TRIGGER = 12,
+
+ ENA_REGS_RESET_GENERIC = 13,
+
+ ENA_REGS_RESET_MISS_INTERRUPT = 14,
+};
+
/* ena_registers offsets */
#define ENA_REGS_VERSION_OFF 0x0
#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
@@ -80,6 +112,8 @@
#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */
#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
@@ -104,6 +138,8 @@
#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
#define ENA_REGS_DEV_STS_READY_MASK 0x1
diff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c
index 290a5666..4c4989a3 100644
--- a/drivers/net/ena/base/ena_eth_com.c
+++ b/drivers/net/ena/base/ena_eth_com.c
@@ -43,11 +43,10 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
head_masked = io_cq->head & (io_cq->q_depth - 1);
expected_phase = io_cq->phase;
- cdesc = (struct ena_eth_io_rx_cdesc_base *)
- ((unsigned char *)io_cq->cdesc_addr.virt_addr
+ cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
@@ -74,7 +73,7 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
offset = tail_masked * io_sq->desc_entry_size;
- return (unsigned char *)io_sq->desc_addr.virt_addr + offset;
+ return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
}
static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
@@ -86,8 +85,8 @@ static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
return;
- memcpy_toio((unsigned char *)io_sq->desc_addr.pbuf_dev_addr + offset,
- (unsigned char *)io_sq->desc_addr.virt_addr + offset,
+ memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
+ io_sq->desc_addr.virt_addr + offset,
io_sq->desc_entry_size);
}
@@ -125,11 +124,11 @@ static inline struct ena_eth_io_rx_cdesc_base *
{
idx &= (io_cq->q_depth - 1);
return (struct ena_eth_io_rx_cdesc_base *)
- ((unsigned char *)io_cq->cdesc_addr.virt_addr +
- idx * io_cq->cdesc_entry_size_in_bytes);
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ idx * io_cq->cdesc_entry_size_in_bytes);
}
-static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
u16 *first_cdesc_idx)
{
struct ena_eth_io_rx_cdesc_base *cdesc;
@@ -143,7 +142,7 @@ static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
@@ -183,9 +182,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
return false;
}
-static inline void ena_com_create_and_store_tx_meta_desc(
- struct ena_com_io_sq *io_sq,
- struct ena_com_tx_ctx *ena_tx_ctx)
+static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
@@ -203,8 +201,8 @@ static inline void ena_com_create_and_store_tx_meta_desc(
ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
/* bits 10-13 of the mss */
meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
- ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) &
- ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
/* Extended meta desc */
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
@@ -237,11 +235,11 @@ static inline void ena_com_create_and_store_tx_meta_desc(
static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc)
{
- ena_rx_ctx->l3_proto = (enum ena_eth_io_l3_proto_index)(cdesc->status &
- ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK);
- ena_rx_ctx->l4_proto = (enum ena_eth_io_l4_proto_index)
- ((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT);
+ ena_rx_ctx->l3_proto = cdesc->status &
+ ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+ ena_rx_ctx->l4_proto =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
ena_rx_ctx->l3_csum_err =
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
@@ -280,8 +278,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
bool have_meta;
u64 addr_hi;
- ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX,
- "wrong Q type");
+ ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
+ "wrong Q type");
/* num_bufs +1 for potential meta desc */
if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
@@ -410,8 +408,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
u16 nb_hw_desc;
u16 i;
- ENA_ASSERT(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,
- "wrong Q type");
+ ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
if (nb_hw_desc == 0) {
@@ -455,8 +453,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
{
struct ena_eth_io_rx_desc *desc;
- ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,
- "wrong Q type");
+ ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
return ENA_COM_NO_SPACE;
@@ -475,8 +473,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
- ((ena_buf->paddr &
- GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+ ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
ena_com_sq_update_tail(io_sq);
@@ -493,20 +490,37 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
expected_phase = io_cq->phase;
cdesc = (struct ena_eth_io_tx_cdesc *)
- ((unsigned char *)io_cq->cdesc_addr.virt_addr
- + (masked_head * io_cq->cdesc_entry_size_in_bytes));
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
/* When the current completion descriptor phase isn't the same as the
* expected, it mean that the device still didn't update
* this completion.
*/
- cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
if (cdesc_phase != expected_phase)
return ENA_COM_TRY_AGAIN;
+ if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
+ ena_trc_err("Invalid req id %d\n", cdesc->req_id);
+ return ENA_COM_INVAL;
+ }
+
ena_com_cq_inc_head(io_cq);
- *req_id = cdesc->req_id;
+ *req_id = READ_ONCE(cdesc->req_id);
return 0;
}
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (cdesc)
+ return false;
+ else
+ return true;
+}
+
diff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h
index 71a880c0..56ea4ae6 100644
--- a/drivers/net/ena/base/ena_eth_com.h
+++ b/drivers/net/ena/base/ena_eth_com.h
@@ -92,10 +92,12 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
struct ena_eth_io_intr_reg *intr_reg)
{
- ENA_REG_WRITE32(intr_reg->intr_control, io_cq->unmask_reg);
+ ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
}
static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
@@ -118,7 +120,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
- ENA_REG_WRITE32(tail, io_sq->db_addr);
+ ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
return 0;
}
@@ -135,7 +137,7 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
if (io_cq->cq_head_db_reg && need_update) {
ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
io_cq->qid, head);
- ENA_REG_WRITE32(head, io_cq->cq_head_db_reg);
+ ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
io_cq->last_head_update = head;
}
@@ -153,7 +155,7 @@ static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
| ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
- ENA_REG_WRITE32(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
+ ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
}
static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
diff --git a/drivers/net/ena/base/ena_plat.h b/drivers/net/ena/base/ena_plat.h
index b5b64545..f829936b 100644
--- a/drivers/net/ena/base/ena_plat.h
+++ b/drivers/net/ena/base/ena_plat.h
@@ -43,7 +43,11 @@
#include "ena_plat_dpdk.h"
#endif
#elif defined(__FreeBSD__)
+#if defined(_KERNEL)
+#include "ena_plat_fbsd.h"
+#else
#include "ena_plat_dpdk.h"
+#endif
#elif defined(_WIN32)
#include "ena_plat_windows.h"
#else
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 93345199..900ba1a6 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -73,10 +73,10 @@ typedef uint64_t dma_addr_t;
#define ENA_COM_INVAL -EINVAL
#define ENA_COM_NO_SPACE -ENOSPC
#define ENA_COM_NO_DEVICE -ENODEV
-#define ENA_COM_PERMISSION -EPERM
#define ENA_COM_TIMER_EXPIRED -ETIME
#define ENA_COM_FAULT -EFAULT
#define ENA_COM_TRY_AGAIN -EAGAIN
+#define ENA_COM_UNSUPPORTED -EOPNOTSUPP
#define ____cacheline_aligned __rte_cache_aligned
@@ -116,11 +116,13 @@ typedef uint64_t dma_addr_t;
#define ENA_MIN16(x, y) RTE_MIN((x), (y))
#define ENA_MIN8(x, y) RTE_MIN((x), (y))
+#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
#define U64_C(x) x ## ULL
#define BIT(nr) (1UL << (nr))
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#ifdef RTE_LIBRTE_ENA_COM_DEBUG
#define ena_trc_dbg(format, arg...) \
@@ -138,6 +140,15 @@ typedef uint64_t dma_addr_t;
#define ena_trc_err(format, arg...) do { } while (0)
#endif /* RTE_LIBRTE_ENA_COM_DEBUG */
+#define ENA_WARN(cond, format, arg...) \
+do { \
+ if (unlikely(cond)) { \
+ ena_trc_err( \
+ "Warn failed on %s:%s:%d:" format, \
+ __FILE__, __func__, __LINE__, ##arg); \
+ } \
+} while (0)
+
/* Spinlock related methods */
#define ena_spinlock_t rte_spinlock_t
#define ENA_SPINLOCK_INIT(spinlock) rte_spinlock_init(&spinlock)
@@ -177,10 +188,21 @@ typedef uint64_t dma_addr_t;
#define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond)
/* pthread condition doesn't need to be rearmed after usage */
#define ENA_WAIT_EVENT_CLEAR(...)
+#define ENA_WAIT_EVENT_DESTROY(waitqueue) ((void)(waitqueue))
#define ena_wait_event_t ena_wait_queue_t
#define ENA_MIGHT_SLEEP()
+#define ENA_TIME_EXPIRE(timeout) (timeout < rte_get_timer_cycles())
+#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
+ (timeout_us * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles())
+
+/*
+ * Each rte_memzone should have unique name.
+ * To satisfy it, count number of allocations and add it to name.
+ */
+extern uint32_t ena_alloc_cnt;
+
#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, handle) \
do { \
const struct rte_memzone *mz; \
@@ -190,17 +212,23 @@ typedef uint64_t dma_addr_t;
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, \
RTE_MEMZONE_IOVA_CONTIG); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
- phys = mz->iova; \
handle = mz; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->iova; \
+ } \
} while (0)
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
({ ENA_TOUCH(size); ENA_TOUCH(phys); \
ENA_TOUCH(dmadev); \
rte_memzone_free(handle); })
-#define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, node, dev_node) \
+#define ENA_MEM_ALLOC_COHERENT_NODE( \
+ dmadev, size, virt, phys, mem_handle, node, dev_node) \
do { \
const struct rte_memzone *mz; \
char z_name[RTE_MEMZONE_NAMESIZE]; \
@@ -209,29 +237,30 @@ typedef uint64_t dma_addr_t;
"ena_alloc_%d", ena_alloc_cnt++); \
mz = rte_memzone_reserve(z_name, size, node, \
RTE_MEMZONE_IOVA_CONTIG); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
- phys = mz->iova; \
+ mem_handle = mz; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->iova; \
+ } \
} while (0)
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
do { \
- const struct rte_memzone *mz; \
- char z_name[RTE_MEMZONE_NAMESIZE]; \
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
- snprintf(z_name, sizeof(z_name), \
- "ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, node, \
- RTE_MEMZONE_IOVA_CONTIG); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
+ virt = rte_zmalloc_socket(NULL, size, 0, node); \
} while (0)
#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
#define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); })
-#define ENA_REG_WRITE32(value, reg) rte_write32_relaxed((value), (reg))
-#define ENA_REG_READ32(reg) rte_read32_relaxed((reg))
+#define ENA_REG_WRITE32(bus, value, reg) \
+ ({ (void)(bus); rte_write32_relaxed((value), (reg)); })
+#define ENA_REG_READ32(bus, reg) \
+ ({ (void)(bus); rte_read32_relaxed((reg)); })
#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr)
#define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr)
@@ -247,4 +276,11 @@ typedef uint64_t dma_addr_t;
#define PTR_ERR(error) ((long)(void *)error)
#define might_sleep()
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+
+#ifndef READ_ONCE
+#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
+#endif
+
#endif /* DPDK_ENA_COM_ENA_PLAT_DPDK_H_ */
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index c595cc7e..c255dc6d 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -54,7 +54,7 @@
#include <ena_eth_io_defs.h>
#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 0
+#define DRV_MODULE_VER_MINOR 1
#define DRV_MODULE_VER_SUBMINOR 0
#define ENA_IO_TXQ_IDX(q) (2 * (q))
@@ -85,6 +85,9 @@
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE
+#define ENA_MIN_RING_DESC 128
+
enum ethtool_stringset {
ETH_SS_TEST = 0,
ETH_SS_STATS,
@@ -114,6 +117,12 @@ struct ena_stats {
#define ENA_STAT_GLOBAL_ENTRY(stat) \
ENA_STAT_ENTRY(stat, dev)
+/*
+ * Each rte_memzone should have unique name.
+ * To satisfy it, count number of allocation and add it to name.
+ */
+uint32_t ena_alloc_cnt;
+
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
ENA_STAT_GLOBAL_ENTRY(io_suspend),
@@ -195,8 +204,11 @@ static const struct rte_pci_id pci_id_ena_map[] = {
{ .device_id = 0 },
};
+static struct ena_aenq_handlers aenq_handlers;
+
static int ena_device_init(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx);
+ struct ena_com_dev_get_features_ctx *get_feat_ctx,
+ bool *wd_state);
static int ena_dev_configure(struct rte_eth_dev *dev);
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -215,7 +227,9 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
static void ena_init_rings(struct ena_adapter *adapter);
static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int ena_start(struct rte_eth_dev *dev);
+static void ena_stop(struct rte_eth_dev *dev);
static void ena_close(struct rte_eth_dev *dev);
+static int ena_dev_reset(struct rte_eth_dev *dev);
static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
@@ -238,6 +252,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
+static void ena_interrupt_handler_rte(void *cb_arg);
+static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
@@ -245,12 +261,14 @@ static const struct eth_dev_ops ena_dev_ops = {
.rx_queue_setup = ena_rx_queue_setup,
.tx_queue_setup = ena_tx_queue_setup,
.dev_start = ena_start,
+ .dev_stop = ena_stop,
.link_update = ena_link_update,
.stats_get = ena_stats_get,
.mtu_set = ena_mtu_set,
.rx_queue_release = ena_rx_queue_release,
.tx_queue_release = ena_tx_queue_release,
.dev_close = ena_close,
+ .dev_reset = ena_dev_reset,
.reta_update = ena_rss_reta_update,
.reta_query = ena_rss_reta_query,
};
@@ -346,9 +364,6 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
ena_meta->mss = mbuf->tso_segsz;
ena_meta->l3_hdr_len = mbuf->l3_len;
ena_meta->l3_hdr_offset = mbuf->l2_len;
- /* this param needed only for TSO */
- ena_meta->l3_outer_hdr_len = 0;
- ena_meta->l3_outer_hdr_offset = 0;
ena_tx_ctx->meta_valid = true;
} else {
@@ -356,6 +371,40 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
}
}
+static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
+{
+ if (likely(req_id < rx_ring->ring_size))
+ return 0;
+
+ RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id);
+
+ rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ rx_ring->adapter->trigger_reset = true;
+
+ return -EFAULT;
+}
+
+static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info = NULL;
+
+ if (likely(req_id < tx_ring->ring_size)) {
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->mbuf))
+ return 0;
+ }
+
+ if (tx_info)
+ RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n");
+ else
+ RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id);
+
+ /* Trigger device reset */
+ tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
+ tx_ring->adapter->trigger_reset = true;
+ return -EFAULT;
+}
+
static void ena_config_host_info(struct ena_com_dev *ena_dev)
{
struct ena_admin_host_info *host_info;
@@ -387,9 +436,12 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
- RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
- if (rc != -EPERM)
- goto err;
+ if (rc == -ENA_COM_UNSUPPORTED)
+ RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
+ else
+ RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
+
+ goto err;
}
return;
@@ -440,9 +492,12 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(&adapter->ena_dev);
if (rc) {
- RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
- if (rc != -EPERM)
- goto err;
+ if (rc == -ENA_COM_UNSUPPORTED)
+ RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
+ else
+ RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
+
+ goto err;
}
return;
@@ -455,12 +510,76 @@ static void ena_close(struct rte_eth_dev *dev)
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
- adapter->state = ENA_ADAPTER_STATE_STOPPED;
+ ena_stop(dev);
+ adapter->state = ENA_ADAPTER_STATE_CLOSED;
ena_rx_queue_release_all(dev);
ena_tx_queue_release_all(dev);
}
+static int
+ena_dev_reset(struct rte_eth_dev *dev)
+{
+ struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES];
+ struct rte_eth_dev *eth_dev;
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+ struct ena_com_dev *ena_dev;
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ struct ena_adapter *adapter;
+ int nb_queues;
+ int rc, i;
+ bool wd_state;
+
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
+ ena_dev = &adapter->ena_dev;
+ eth_dev = adapter->rte_dev;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+ nb_queues = eth_dev->data->nb_rx_queues;
+
+ ena_com_set_admin_running_state(ena_dev, false);
+
+ rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
+ if (rc)
+ RTE_LOG(ERR, PMD, "Device reset failed\n");
+
+ for (i = 0; i < nb_queues; i++)
+ mb_pool_rx[i] = adapter->rx_ring[i].mb_pool;
+
+ ena_rx_queue_release_all(eth_dev);
+ ena_tx_queue_release_all(eth_dev);
+
+ rte_intr_disable(intr_handle);
+
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
+ if (rc) {
+ PMD_INIT_LOG(CRIT, "Cannot initialize device\n");
+ return rc;
+ }
+ adapter->wd_state = wd_state;
+
+ rte_intr_enable(intr_handle);
+ ena_com_set_admin_polling_mode(ena_dev, false);
+ ena_com_admin_aenq_enable(ena_dev);
+
+ for (i = 0; i < nb_queues; ++i)
+ ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL,
+ mb_pool_rx[i]);
+
+ for (i = 0; i < nb_queues; ++i)
+ ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL);
+
+ adapter->trigger_reset = false;
+
+ return 0;
+}
+
static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
@@ -468,7 +587,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
struct ena_com_dev *ena_dev = &adapter->ena_dev;
- int ret, i;
+ int rc, i;
u16 entry_value;
int conf_idx;
int idx;
@@ -480,8 +599,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
RTE_LOG(WARNING, PMD,
"indirection table %d is bigger than supported (%d)\n",
reta_size, ENA_RX_RSS_TABLE_SIZE);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
for (i = 0 ; i < reta_size ; i++) {
@@ -493,29 +611,28 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
entry_value =
ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
- ret = ena_com_indirect_table_fill_entry(ena_dev,
- i,
- entry_value);
- if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+
+ rc = ena_com_indirect_table_fill_entry(ena_dev,
+ i,
+ entry_value);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
RTE_LOG(ERR, PMD,
"Cannot fill indirect table\n");
- ret = -ENOTSUP;
- goto err;
+ return rc;
}
}
}
- ret = ena_com_indirect_table_set(ena_dev);
- if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
- ret = -ENOTSUP;
- goto err;
+ return rc;
}
RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n",
__func__, reta_size, adapter->rte_dev->data->port_id);
-err:
- return ret;
+
+ return 0;
}
/* Query redirection table. */
@@ -526,7 +643,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
struct ena_com_dev *ena_dev = &adapter->ena_dev;
- int ret;
+ int rc;
int i;
u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
int reta_conf_idx;
@@ -536,11 +653,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
(reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
return -EINVAL;
- ret = ena_com_indirect_table_get(ena_dev, indirect_table);
- if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+ rc = ena_com_indirect_table_get(ena_dev, indirect_table);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
RTE_LOG(ERR, PMD, "cannot get indirect table\n");
- ret = -ENOTSUP;
- goto err;
+ return -ENOTSUP;
}
for (i = 0 ; i < reta_size ; i++) {
@@ -550,8 +666,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
reta_conf[reta_conf_idx].reta[reta_idx] =
ENA_IO_RXQ_IDX_REV(indirect_table[i]);
}
-err:
- return ret;
+
+ return 0;
}
static int ena_rss_init_default(struct ena_adapter *adapter)
@@ -571,7 +687,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = i % nb_rx_queues;
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(ERR, PMD, "Cannot fill indirect table\n");
goto err_fill_indir;
}
@@ -579,19 +695,19 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(INFO, PMD, "Cannot fill hash function\n");
goto err_fill_indir;
}
rc = ena_com_set_default_hash_ctrl(ena_dev);
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(INFO, PMD, "Cannot fill hash control\n");
goto err_fill_indir;
}
rc = ena_com_indirect_table_set(ena_dev);
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
goto err_fill_indir;
}
@@ -650,6 +766,10 @@ static void ena_rx_queue_release(void *queue)
rte_free(ring->rx_buffer_info);
ring->rx_buffer_info = NULL;
+ if (ring->empty_rx_reqs)
+ rte_free(ring->empty_rx_reqs);
+ ring->empty_rx_reqs = NULL;
+
ring->configured = 0;
RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n",
@@ -723,9 +843,12 @@ static int ena_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
struct rte_eth_link *link = &dev->data->dev_link;
+ struct ena_adapter *adapter;
+
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
- link->link_status = ETH_LINK_UP;
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ link->link_speed = ETH_SPEED_NUM_NONE;
link->link_duplex = ETH_LINK_FULL_DUPLEX;
return 0;
@@ -737,13 +860,18 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev,
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
struct ena_ring *queues = NULL;
+ int nb_queues;
int i = 0;
int rc = 0;
- queues = (ring_type == ENA_RING_TYPE_RX) ?
- adapter->rx_ring : adapter->tx_ring;
-
- for (i = 0; i < adapter->num_queues; i++) {
+ if (ring_type == ENA_RING_TYPE_RX) {
+ queues = adapter->rx_ring;
+ nb_queues = dev->data->nb_rx_queues;
+ } else {
+ queues = adapter->tx_ring;
+ nb_queues = dev->data->nb_tx_queues;
+ }
+ for (i = 0; i < nb_queues; i++) {
if (queues[i].configured) {
if (ring_type == ENA_RING_TYPE_RX) {
ena_assert_msg(
@@ -761,7 +889,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR,
"failed to restart queue %d type(%d)",
i, ring_type);
- return -1;
+ return rc;
}
}
}
@@ -785,9 +913,11 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
{
uint32_t max_frame_len = ena_get_mtu_conf(adapter);
- if (max_frame_len > adapter->max_mtu) {
- PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len);
- return -1;
+ if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
+ PMD_INIT_LOG(ERR, "Unsupported MTU of %d. "
+ "max mtu: %d, min mtu: %d\n",
+ max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
+ return ENA_COM_UNSUPPORTED;
}
return 0;
@@ -795,6 +925,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
static int
ena_calc_queue_size(struct ena_com_dev *ena_dev,
+ u16 *max_tx_sgl_size,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
@@ -812,11 +943,14 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev,
if (!rte_is_power_of_2(queue_size))
queue_size = rte_align32pow2(queue_size >> 1);
- if (queue_size == 0) {
+ if (unlikely(queue_size == 0)) {
PMD_INIT_LOG(ERR, "Invalid queue size");
return -EFAULT;
}
+ *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
+ get_feat_ctx->max_queues.max_packet_tx_descs);
+
return queue_size;
}
@@ -881,12 +1015,12 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
- if (mtu > ena_get_mtu_conf(adapter)) {
+ if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
RTE_LOG(ERR, PMD,
- "Given MTU (%d) exceeds maximum MTU supported (%d)\n",
- mtu, ena_get_mtu_conf(adapter));
- rc = -EINVAL;
- goto err;
+ "Invalid MTU setting. new_mtu: %d "
+ "max mtu: %d min mtu: %d\n",
+ mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
+ return -EINVAL;
}
rc = ena_com_set_dev_mtu(ena_dev, mtu);
@@ -895,7 +1029,6 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
else
RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu);
-err:
return rc;
}
@@ -903,14 +1036,9 @@ static int ena_start(struct rte_eth_dev *dev)
{
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
+ uint64_t ticks;
int rc = 0;
- if (!(adapter->state == ENA_ADAPTER_STATE_CONFIG ||
- adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
- PMD_INIT_LOG(ERR, "API violation");
- return -1;
- }
-
rc = ena_check_valid_conf(adapter);
if (rc)
return rc;
@@ -924,7 +1052,7 @@ static int ena_start(struct rte_eth_dev *dev)
return rc;
if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
- ETH_MQ_RX_RSS_FLAG) {
+ ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
rc = ena_rss_init_default(adapter);
if (rc)
return rc;
@@ -932,11 +1060,28 @@ static int ena_start(struct rte_eth_dev *dev)
ena_stats_restart(dev);
+ adapter->timestamp_wd = rte_get_timer_cycles();
+ adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
+
+ ticks = rte_get_timer_hz();
+ rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
+ ena_timer_wd_callback, adapter);
+
adapter->state = ENA_ADAPTER_STATE_RUNNING;
return 0;
}
+static void ena_stop(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ rte_timer_stop_sync(&adapter->timer_wd);
+
+ adapter->state = ENA_ADAPTER_STATE_STOPPED;
+}
+
static int ena_queue_restart(struct ena_ring *ring)
{
int rc, bufs_num;
@@ -954,7 +1099,7 @@ static int ena_queue_restart(struct ena_ring *ring)
rc = ena_populate_rx_queue(ring, bufs_num);
if (rc != bufs_num) {
PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
- return (-1);
+ return ENA_COM_FAULT;
}
return 0;
@@ -984,12 +1129,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(CRIT, PMD,
"API violation. Queue %d is already configured\n",
queue_idx);
- return -1;
+ return ENA_COM_FAULT;
}
if (!rte_is_power_of_2(nb_desc)) {
RTE_LOG(ERR, PMD,
- "Unsupported size of RX queue: %d is not a power of 2.",
+ "Unsupported size of TX queue: %d is not a power of 2.",
nb_desc);
return -EINVAL;
}
@@ -1015,6 +1160,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(ERR, PMD,
"failed to create io TX queue #%d (qid:%d) rc: %d\n",
queue_idx, ena_qid, rc);
+ return rc;
}
txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
@@ -1026,10 +1172,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(ERR, PMD,
"Failed to get TX queue handlers. TX queue num %d rc: %d\n",
queue_idx, rc);
- ena_com_destroy_io_queue(ena_dev, ena_qid);
- goto err;
+ goto err_destroy_io_queue;
}
+ ena_com_update_numa_node(txq->ena_com_io_cq, ctx.numa_node);
+
txq->port_id = dev->data->port_id;
txq->next_to_clean = 0;
txq->next_to_use = 0;
@@ -1041,7 +1188,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!txq->tx_buffer_info) {
RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_destroy_io_queue;
}
txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
@@ -1049,18 +1197,29 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!txq->empty_tx_reqs) {
RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n");
- rte_free(txq->tx_buffer_info);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_free;
}
+
for (i = 0; i < txq->ring_size; i++)
txq->empty_tx_reqs[i] = i;
- txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ if (tx_conf != NULL) {
+ txq->offloads =
+ tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ }
/* Store pointer to this queue in upper layer */
txq->configured = 1;
dev->data->tx_queues[queue_idx] = txq;
-err:
+
+ return 0;
+
+err_free:
+ rte_free(txq->tx_buffer_info);
+
+err_destroy_io_queue:
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
return rc;
}
@@ -1079,7 +1238,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
(struct ena_adapter *)(dev->data->dev_private);
struct ena_ring *rxq = NULL;
uint16_t ena_qid = 0;
- int rc = 0;
+ int i, rc = 0;
struct ena_com_dev *ena_dev = &adapter->ena_dev;
rxq = &adapter->rx_ring[queue_idx];
@@ -1087,12 +1246,12 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(CRIT, PMD,
"API violation. Queue %d is already configured\n",
queue_idx);
- return -1;
+ return ENA_COM_FAULT;
}
if (!rte_is_power_of_2(nb_desc)) {
RTE_LOG(ERR, PMD,
- "Unsupported size of TX queue: %d is not a power of 2.",
+ "Unsupported size of RX queue: %d is not a power of 2.",
nb_desc);
return -EINVAL;
}
@@ -1114,9 +1273,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
ctx.numa_node = ena_cpu_to_node(queue_idx);
rc = ena_com_create_io_queue(ena_dev, &ctx);
- if (rc)
+ if (rc) {
RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n",
queue_idx, rc);
+ return rc;
+ }
rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
@@ -1129,6 +1290,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
"Failed to get RX queue handlers. RX queue num %d rc: %d\n",
queue_idx, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
}
rxq->port_id = dev->data->port_id;
@@ -1142,9 +1304,24 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!rxq->rx_buffer_info) {
RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n");
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return -ENOMEM;
+ }
+
+ rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
+ sizeof(uint16_t) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->empty_rx_reqs) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
+ rte_free(rxq->rx_buffer_info);
+ rxq->rx_buffer_info = NULL;
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
return -ENOMEM;
}
+ for (i = 0; i < nb_desc; i++)
+ rxq->empty_tx_reqs[i] = i;
+
/* Store pointer to this queue in upper layer */
rxq->configured = 1;
dev->data->rx_queues[queue_idx] = rxq;
@@ -1159,7 +1336,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
uint16_t ring_size = rxq->ring_size;
uint16_t ring_mask = ring_size - 1;
uint16_t next_to_use = rxq->next_to_use;
- uint16_t in_use;
+ uint16_t in_use, req_id;
struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
if (unlikely(!count))
@@ -1187,12 +1364,18 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
struct ena_com_buf ebuf;
rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
+
+ req_id = rxq->empty_rx_reqs[next_to_use_masked];
+ rc = validate_rx_req_id(rxq, req_id);
+ if (unlikely(rc < 0))
+ break;
+
/* prepare physical address for DMA transaction */
ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
/* pass resource to device */
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
- &ebuf, next_to_use_masked);
+ &ebuf, req_id);
if (unlikely(rc)) {
rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
count - i);
@@ -1202,9 +1385,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
next_to_use++;
}
+ if (unlikely(i < count))
+ RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d "
+ "buffers (from %d)\n", rxq->id, i, count);
+
/* When we submitted free recources to device... */
- if (i > 0) {
- /* ...let HW know that it can fill buffers with data */
+ if (likely(i > 0)) {
+ /* ...let HW know that it can fill buffers with data
+ *
+ * Add memory barrier to make sure the desc were written before
+ * issue a doorbell
+ */
rte_wmb();
ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
@@ -1215,8 +1406,10 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
}
static int ena_device_init(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+ struct ena_com_dev_get_features_ctx *get_feat_ctx,
+ bool *wd_state)
{
+ uint32_t aenq_groups;
int rc;
bool readless_supported;
@@ -1236,7 +1429,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
ena_com_set_mmio_read_mode(ena_dev, readless_supported);
/* reset device */
- rc = ena_com_dev_reset(ena_dev);
+ rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
if (rc) {
RTE_LOG(ERR, PMD, "cannot reset device\n");
goto err_mmio_read_less;
@@ -1252,7 +1445,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
/* ENA device administration layer init */
- rc = ena_com_admin_init(ena_dev, NULL, true);
+ rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
if (rc) {
RTE_LOG(ERR, PMD,
"cannot initialize ena admin queue with device\n");
@@ -1275,6 +1468,21 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
goto err_admin_init;
}
+ aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
+ BIT(ENA_ADMIN_NOTIFICATION) |
+ BIT(ENA_ADMIN_KEEP_ALIVE) |
+ BIT(ENA_ADMIN_FATAL_ERROR) |
+ BIT(ENA_ADMIN_WARNING);
+
+ aenq_groups &= get_feat_ctx->aenq.supported_groups;
+ rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc);
+ goto err_admin_init;
+ }
+
+ *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
+
return 0;
err_admin_init:
@@ -1286,16 +1494,89 @@ err_mmio_read_less:
return rc;
}
+static void ena_interrupt_handler_rte(void *cb_arg)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)cb_arg;
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+
+ ena_com_admin_q_comp_intr_handler(ena_dev);
+ if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
+ ena_com_aenq_intr_handler(ena_dev, adapter);
+}
+
+static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+{
+ if (!adapter->wd_state)
+ return;
+
+ if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
+ if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
+ adapter->keep_alive_timeout)) {
+ RTE_LOG(ERR, PMD, "Keep alive timeout\n");
+ adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
+ adapter->trigger_reset = true;
+ }
+}
+
+/* Check if admin queue is enabled */
+static void check_for_admin_com_state(struct ena_adapter *adapter)
+{
+ if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
+ RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n");
+ adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
+ adapter->trigger_reset = true;
+ }
+}
+
+static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
+ void *arg)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)arg;
+ struct rte_eth_dev *dev = adapter->rte_dev;
+
+ check_for_missing_keep_alive(adapter);
+ check_for_admin_com_state(adapter);
+
+ if (unlikely(adapter->trigger_reset)) {
+ RTE_LOG(ERR, PMD, "Trigger reset is on\n");
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
+}
+
+static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ int io_sq_num, io_cq_num, io_queue_num;
+
+ io_sq_num = get_feat_ctx->max_queues.max_sq_num;
+ io_cq_num = get_feat_ctx->max_queues.max_cq_num;
+
+ io_queue_num = RTE_MIN(io_sq_num, io_cq_num);
+
+ if (unlikely(io_queue_num == 0)) {
+ RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n");
+ return -EFAULT;
+ }
+
+ return io_queue_num;
+}
+
static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct ena_adapter *adapter =
(struct ena_adapter *)(eth_dev->data->dev_private);
struct ena_com_dev *ena_dev = &adapter->ena_dev;
struct ena_com_dev_get_features_ctx get_feat_ctx;
int queue_size, rc;
+ u16 tx_sgl_size = 0;
static int adapters_found;
+ bool wd_state;
memset(adapter, 0, sizeof(struct ena_adapter));
ena_dev = &adapter->ena_dev;
@@ -1319,19 +1600,16 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
pci_dev->addr.devid,
pci_dev->addr.function);
+ intr_handle = &pci_dev->intr_handle;
+
adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
- /* Present ENA_MEM_BAR indicates available LLQ mode.
- * Use corresponding policy
- */
- if (adapter->dev_mem_base)
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
- else if (adapter->regs)
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
- else
+ if (!adapter->regs) {
PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
ENA_REGS_BAR);
+ return -ENXIO;
+ }
ena_dev->reg_bar = adapter->regs;
ena_dev->dmadev = adapter->pdev;
@@ -1342,36 +1620,28 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
adapter->id_number);
/* device specific initialization routine */
- rc = ena_device_init(ena_dev, &get_feat_ctx);
+ rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
if (rc) {
PMD_INIT_LOG(CRIT, "Failed to init ENA device");
- return -1;
- }
-
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- if (get_feat_ctx.max_queues.max_llq_num == 0) {
- PMD_INIT_LOG(ERR,
- "Trying to use LLQ but llq_num is 0.\n"
- "Fall back into regular queues.");
- ena_dev->tx_mem_queue_type =
- ENA_ADMIN_PLACEMENT_POLICY_HOST;
- adapter->num_queues =
- get_feat_ctx.max_queues.max_sq_num;
- } else {
- adapter->num_queues =
- get_feat_ctx.max_queues.max_llq_num;
- }
- } else {
- adapter->num_queues = get_feat_ctx.max_queues.max_sq_num;
+ goto err;
}
+ adapter->wd_state = wd_state;
- queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx);
- if ((queue_size <= 0) || (adapter->num_queues <= 0))
- return -EFAULT;
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ adapter->num_queues = ena_calc_io_queue_num(ena_dev,
+ &get_feat_ctx);
+
+ queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx);
+ if (queue_size <= 0 || adapter->num_queues <= 0) {
+ rc = -EFAULT;
+ goto err_device_destroy;
+ }
adapter->tx_ring_size = queue_size;
adapter->rx_ring_size = queue_size;
+ adapter->max_tx_sgl_size = tx_sgl_size;
+
/* prepare ring structures */
ena_init_rings(adapter);
@@ -1394,13 +1664,66 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
RTE_CACHE_LINE_SIZE);
if (!adapter->drv_stats) {
RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_delete_debug_area;
}
+ rte_intr_callback_register(intr_handle,
+ ena_interrupt_handler_rte,
+ adapter);
+ rte_intr_enable(intr_handle);
+ ena_com_set_admin_polling_mode(ena_dev, false);
+ ena_com_admin_aenq_enable(ena_dev);
+
+ if (adapters_found == 0)
+ rte_timer_subsystem_init();
+ rte_timer_init(&adapter->timer_wd);
+
adapters_found++;
adapter->state = ENA_ADAPTER_STATE_INIT;
return 0;
+
+err_delete_debug_area:
+ ena_com_delete_debug_area(ena_dev);
+
+err_device_destroy:
+ ena_com_delete_host_info(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+
+err:
+ return rc;
+}
+
+static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(eth_dev->data->dev_private);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
+ ena_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->tx_pkt_prepare = NULL;
+
+ rte_free(adapter->drv_stats);
+ adapter->drv_stats = NULL;
+
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ena_interrupt_handler_rte,
+ adapter);
+
+ adapter->state = ENA_ADAPTER_STATE_FREE;
+
+ return 0;
}
static int ena_dev_configure(struct rte_eth_dev *dev)
@@ -1408,25 +1731,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
- if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
- adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
- PMD_INIT_LOG(ERR, "Illegal adapter state: %d",
- adapter->state);
- return -1;
- }
-
- switch (adapter->state) {
- case ENA_ADAPTER_STATE_INIT:
- case ENA_ADAPTER_STATE_STOPPED:
- adapter->state = ENA_ADAPTER_STATE_CONFIG;
- break;
- case ENA_ADAPTER_STATE_CONFIG:
- RTE_LOG(WARNING, PMD,
- "Ivalid driver state while trying to configure device\n");
- break;
- default:
- break;
- }
+ adapter->state = ENA_ADAPTER_STATE_CONFIG;
adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
@@ -1446,6 +1751,7 @@ static void ena_init_rings(struct ena_adapter *adapter)
ring->id = i;
ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
+ ring->sgl_size = adapter->max_tx_sgl_size;
}
for (i = 0; i < adapter->num_queues; i++) {
@@ -1526,6 +1832,16 @@ static void ena_infos_get(struct rte_eth_dev *dev,
adapter->tx_supported_offloads = tx_feat;
adapter->rx_supported_offloads = rx_feat;
+
+ dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC;
+ dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
+
+ dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC;
+ dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
+ dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
+ feat.max_queues.max_packet_tx_descs);
+ dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
+ feat.max_queues.max_packet_tx_descs);
}
static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -1536,6 +1852,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
unsigned int ring_mask = ring_size - 1;
uint16_t next_to_clean = rx_ring->next_to_clean;
uint16_t desc_in_use = 0;
+ uint16_t req_id;
unsigned int recv_idx = 0;
struct rte_mbuf *mbuf = NULL;
struct rte_mbuf *mbuf_head = NULL;
@@ -1569,6 +1886,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
&ena_rx_ctx);
if (unlikely(rc)) {
RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc);
+ rx_ring->adapter->trigger_reset = true;
return 0;
}
@@ -1576,12 +1894,17 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
break;
while (segments < ena_rx_ctx.descs) {
- mbuf = rx_buff_info[next_to_clean & ring_mask];
+ req_id = ena_rx_ctx.ena_bufs[segments].req_id;
+ rc = validate_rx_req_id(rx_ring, req_id);
+ if (unlikely(rc))
+ break;
+
+ mbuf = rx_buff_info[req_id];
mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->refcnt = 1;
mbuf->next = NULL;
- if (segments == 0) {
+ if (unlikely(segments == 0)) {
mbuf->nb_segs = ena_rx_ctx.descs;
mbuf->port = rx_ring->port_id;
mbuf->pkt_len = 0;
@@ -1593,6 +1916,8 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
mbuf_head->pkt_len += mbuf->data_len;
mbuf_prev = mbuf;
+ rx_ring->empty_rx_reqs[next_to_clean & ring_mask] =
+ req_id;
segments++;
next_to_clean++;
}
@@ -1686,6 +2011,46 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+static void ena_update_hints(struct ena_adapter *adapter,
+ struct ena_admin_ena_hw_hints *hints)
+{
+ if (hints->admin_completion_tx_timeout)
+ adapter->ena_dev.admin_queue.completion_timeout =
+ hints->admin_completion_tx_timeout * 1000;
+
+ if (hints->mmio_read_timeout)
+ /* convert to usec */
+ adapter->ena_dev.mmio_read.reg_read_to =
+ hints->mmio_read_timeout * 1000;
+
+ if (hints->driver_watchdog_timeout) {
+ if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
+ else
+ // Convert msecs to ticks
+ adapter->keep_alive_timeout =
+ (hints->driver_watchdog_timeout *
+ rte_get_timer_hz()) / 1000;
+ }
+}
+
+static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
+ struct rte_mbuf *mbuf)
+{
+ int num_segments, rc;
+
+ num_segments = mbuf->nb_segs;
+
+ if (likely(num_segments < tx_ring->sgl_size))
+ return 0;
+
+ rc = rte_pktmbuf_linearize(mbuf);
+ if (unlikely(rc))
+ RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n");
+
+ return rc;
+}
+
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -1716,6 +2081,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
mbuf = tx_pkts[sent_idx];
+ rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
+ if (unlikely(rc))
+ break;
+
req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask];
tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info->mbuf = mbuf;
@@ -1793,6 +2162,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Clear complete packets */
while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) {
+ rc = validate_tx_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
/* Get Tx info & store how many descs were processed */
tx_info = &tx_ring->tx_buffer_info[req_id];
total_tx_descs += tx_info->tx_descs;
@@ -1820,6 +2193,9 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return sent_idx;
}
+/*********************************************************************
+ * PMD configuration
+ *********************************************************************/
static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
@@ -1829,12 +2205,13 @@ static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
}
static struct rte_pci_driver rte_ena_pmd = {
.id_table = pci_id_ena_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_WC_ACTIVATE,
.probe = eth_ena_pci_probe,
.remove = eth_ena_pci_remove,
};
@@ -1843,9 +2220,7 @@ RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(ena_init_log);
-static void
-ena_init_log(void)
+RTE_INIT(ena_init_log)
{
ena_logtype_init = rte_log_register("pmd.net.ena.init");
if (ena_logtype_init >= 0)
@@ -1854,3 +2229,75 @@ ena_init_log(void)
if (ena_logtype_driver >= 0)
rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE);
}
+
+/******************************************************************************
+ ******************************** AENQ Handlers *******************************
+ *****************************************************************************/
+static void ena_update_on_link_change(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct rte_eth_dev *eth_dev;
+ struct ena_adapter *adapter;
+ struct ena_admin_aenq_link_change_desc *aenq_link_desc;
+ uint32_t status;
+
+ adapter = (struct ena_adapter *)adapter_data;
+ aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
+ eth_dev = adapter->rte_dev;
+
+ status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
+ adapter->link_status = status;
+
+ ena_link_update(eth_dev, 0);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+static void ena_notification(void *data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+ struct ena_admin_ena_hw_hints *hints;
+
+ if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
+ RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n",
+ aenq_e->aenq_common_desc.group,
+ ENA_ADMIN_NOTIFICATION);
+
+ switch (aenq_e->aenq_common_desc.syndrom) {
+ case ENA_ADMIN_UPDATE_HINTS:
+ hints = (struct ena_admin_ena_hw_hints *)
+ (&aenq_e->inline_data_w4);
+ ena_update_hints(adapter, hints);
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n",
+ aenq_e->aenq_common_desc.syndrom);
+ }
+}
+
+static void ena_keep_alive(void *adapter_data,
+ __rte_unused struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+
+ adapter->timestamp_wd = rte_get_timer_cycles();
+}
+
+/**
+ * This handler will called for unknown event group or unimplemented handlers
+ **/
+static void unimplemented_aenq_handler(__rte_unused void *data,
+ __rte_unused struct ena_admin_aenq_entry *aenq_e)
+{
+ RTE_LOG(ERR, PMD, "Unknown event was received or event with "
+ "unimplemented handler\n");
+}
+
+static struct ena_aenq_handlers aenq_handlers = {
+ .handlers = {
+ [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
+ [ENA_ADMIN_NOTIFICATION] = ena_notification,
+ [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
+ },
+ .unimplemented_handler = unimplemented_aenq_handler
+};
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 394d05e0..2dc8129e 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -34,8 +34,10 @@
#ifndef _ENA_ETHDEV_H_
#define _ENA_ETHDEV_H_
+#include <rte_cycles.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
+#include <rte_timer.h>
#include "ena_com.h"
@@ -48,8 +50,13 @@
#define ENA_NAME_MAX_LEN 20
#define ENA_PKT_MAX_BUFS 17
+#define ENA_MIN_MTU 128
+
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+#define ENA_WD_TIMEOUT_SEC 3
+#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
+
struct ena_adapter;
enum ena_ring_type {
@@ -70,8 +77,12 @@ struct ena_ring {
enum ena_ring_type type;
enum ena_admin_placement_policy_type tx_mem_queue_type;
- /* Holds the empty requests for TX OOO completions */
- uint16_t *empty_tx_reqs;
+ /* Holds the empty requests for TX/RX OOO completions */
+ union {
+ uint16_t *empty_tx_reqs;
+ uint16_t *empty_rx_reqs;
+ };
+
union {
struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
@@ -92,14 +103,16 @@ struct ena_ring {
int configured;
struct ena_adapter *adapter;
uint64_t offloads;
+ u16 sgl_size;
} __rte_cache_aligned;
enum ena_adapter_state {
ENA_ADAPTER_STATE_FREE = 0,
ENA_ADAPTER_STATE_INIT = 1,
- ENA_ADAPTER_STATE_RUNNING = 2,
+ ENA_ADAPTER_STATE_RUNNING = 2,
ENA_ADAPTER_STATE_STOPPED = 3,
ENA_ADAPTER_STATE_CONFIG = 4,
+ ENA_ADAPTER_STATE_CLOSED = 5,
};
struct ena_driver_stats {
@@ -157,6 +170,7 @@ struct ena_adapter {
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
int tx_ring_size;
+ u16 max_tx_sgl_size;
/* RX */
struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
@@ -180,6 +194,18 @@ struct ena_adapter {
uint64_t tx_selected_offloads;
uint64_t rx_supported_offloads;
uint64_t rx_selected_offloads;
+
+ bool link_status;
+
+ enum ena_regs_reset_reason_types reset_reason;
+
+ struct rte_timer timer_wd;
+ uint64_t timestamp_wd;
+ uint64_t keep_alive_timeout;
+
+ bool trigger_reset;
+
+ bool wd_state;
};
#endif /* _ENA_ETHDEV_H_ */
diff --git a/drivers/net/ena/meson.build b/drivers/net/ena/meson.build
new file mode 100644
index 00000000..091ca6e3
--- /dev/null
+++ b/drivers/net/ena/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('ena_ethdev.c',
+ 'base/ena_com.c',
+ 'base/ena_eth_com.c')
+
+deps += ['timer']
+
+includes += include_directories('base', 'base/ena_defs')
diff --git a/drivers/net/enic/base/cq_desc.h b/drivers/net/enic/base/cq_desc.h
index 7e138027..ae8847c6 100644
--- a/drivers/net/enic/base/cq_desc.h
+++ b/drivers/net/enic/base/cq_desc.h
@@ -38,6 +38,7 @@ struct cq_desc {
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_COLOR_SHIFT 7
+#define CQ_DESC_COLOR_MASK_NOSHIFT 0x80
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 8483f76f..16e8814a 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -528,6 +528,22 @@ parse_max_level:
return 0;
}
+void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
+ bool *weak)
+{
+ u64 a0 = CMD_NIC_CFG, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *cfg_chk = false;
+ *weak = false;
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err == 0 && a0 != 0 && a1 != 0) {
+ *cfg_chk = true;
+ *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
+ }
+}
+
int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 3c908430..270a47bd 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -6,6 +6,8 @@
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
+#include <stdbool.h>
+
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -109,6 +111,8 @@ int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd);
int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
u8 *filter_actions);
+void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
+ bool *weak);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index 2865eb4d..a22d8a76 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -138,9 +138,27 @@ enum vnic_devcmd_cmd {
/* del VLAN id in (u16)a0 */
CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
- /* nic_cfg in (u32)a0 */
+ /*
+ * nic_cfg in (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if a1 is valid
+ * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * (flags are CMD_NIC_CFG_CAPF_xxx)
+ */
CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+ /*
+ * nic_cfg_chk (same as nic_cfg, but may return error)
+ * in (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if a1 is valid
+ * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * (flags are CMD_NIC_CFG_CAPF_xxx)
+ */
+ CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
/* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
@@ -605,6 +623,9 @@ enum filter_cap_mode {
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
+/* flags for CMD_NIC_CFG */
+#define CMD_NIC_CFG_CAPF_UDP_WEAK (1ULL << 0) /* Bodega-style UDP RSS */
+
/* flags for CMD_PACKET_FILTER */
#define CMD_PFILTER_DIRECTED 0x01
#define CMD_PFILTER_MULTICAST 0x02
diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h
index 49504a7d..901f3b46 100644
--- a/drivers/net/enic/base/vnic_enet.h
+++ b/drivers/net/enic/base/vnic_enet.h
@@ -53,9 +53,8 @@ struct vnic_enet_config {
#define VENETF_NVGRE 0x20000 /* NVGRE offload */
#define VENETF_GRPINTR 0x40000 /* group interrupt */
#define VENETF_NICSWITCH 0x80000 /* NICSWITCH enabled */
-#define VENETF_RSSHASH_UDP_WEAK 0x100000 /* VIC has Bodega-style UDP RSS */
-#define VENETF_RSSHASH_UDPIPV4 0x200000 /* Hash on UDP + IPv4 fields */
-#define VENETF_RSSHASH_UDPIPV6 0x400000 /* Hash on UDP + IPv6 fields */
+#define VENETF_RSSHASH_UDPIPV4 0x100000 /* Hash on UDP + IPv4 fields */
+#define VENETF_RSSHASH_UDPIPV6 0x200000 /* Hash on UDP + IPv6 fields */
#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
diff --git a/drivers/net/enic/base/vnic_nic.h b/drivers/net/enic/base/vnic_nic.h
index e318d0cb..16040852 100644
--- a/drivers/net/enic/base/vnic_nic.h
+++ b/drivers/net/enic/base/vnic_nic.h
@@ -32,8 +32,8 @@
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
+#define NIC_CFG_RSS_HASH_TYPE_RSVD1 (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_RSVD2 (1 << 6)
#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7)
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
index 9619290d..d8e67f74 100644
--- a/drivers/net/enic/base/vnic_rq.h
+++ b/drivers/net/enic/base/vnic_rq.h
@@ -52,6 +52,8 @@ struct vnic_rq {
struct vnic_dev *vdev;
struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
+ struct rte_mbuf **free_mbufs; /* reserve of free mbufs */
+ int num_free_mbufs;
struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
unsigned int mbuf_next_idx; /* next mb to consume */
void *os_buf_head;
diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
index d61c4c6e..c9bf3572 100644
--- a/drivers/net/enic/base/vnic_wq.c
+++ b/drivers/net/enic/base/vnic_wq.c
@@ -32,8 +32,8 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
unsigned int count = wq->ring.desc_count;
/* Allocate the mbuf ring */
- wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs",
- sizeof(struct vnic_wq_buf) * count,
+ wq->bufs = (struct rte_mbuf **)rte_zmalloc_socket("wq->bufs",
+ sizeof(struct rte_mbuf *) * count,
RTE_CACHE_LINE_SIZE, wq->socket_id);
wq->head_idx = 0;
wq->tail_idx = 0;
@@ -113,6 +113,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
vnic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
+ wq->cq_pend = 0;
wq->last_completed_index = 0;
}
@@ -145,9 +146,9 @@ int vnic_wq_disable(struct vnic_wq *wq)
}
void vnic_wq_clean(struct vnic_wq *wq,
- void (*buf_clean)(struct vnic_wq_buf *buf))
+ void (*buf_clean)(struct rte_mbuf **buf))
{
- struct vnic_wq_buf *buf;
+ struct rte_mbuf **buf;
unsigned int to_clean = wq->tail_idx;
buf = &wq->bufs[to_clean];
diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
index 0135bffc..236cf696 100644
--- a/drivers/net/enic/base/vnic_wq.h
+++ b/drivers/net/enic/base/vnic_wq.h
@@ -36,24 +36,20 @@ struct vnic_wq_ctrl {
u32 pad9;
};
-/* 16 bytes */
-struct vnic_wq_buf {
- struct rte_mempool *pool;
- void *mb;
-};
-
struct vnic_wq {
unsigned int index;
uint64_t tx_offload_notsup_mask;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
- struct vnic_wq_buf *bufs;
+ struct rte_mbuf **bufs;
unsigned int head_idx;
+ unsigned int cq_pend;
unsigned int tail_idx;
unsigned int socket_id;
const struct rte_memzone *cqmsg_rz;
uint16_t last_completed_index;
+ uint64_t offloads;
};
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
@@ -164,5 +160,5 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq);
void vnic_wq_enable(struct vnic_wq *wq);
int vnic_wq_disable(struct vnic_wq *wq);
void vnic_wq_clean(struct vnic_wq *wq,
- void (*buf_clean)(struct vnic_wq_buf *buf));
+ void (*buf_clean)(struct rte_mbuf **buf));
#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index ee83fe57..7c27bd51 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -50,6 +50,9 @@
#define ENICPMD_FDIR_MAX 64
+/* HW default VXLAN port */
+#define ENIC_DEFAULT_VXLAN_PORT 4789
+
/*
* Interrupt 0: LSC and errors
* Interrupt 1: rx queue 0
@@ -123,6 +126,10 @@ struct enic {
u8 filter_actions; /* HW supported actions */
bool vxlan;
bool disable_overlay; /* devargs disable_overlay=1 */
+ bool nic_cfg_chk; /* NIC_CFG_CHK available */
+ bool udp_rss_weak; /* Bodega style UDP RSS */
+ uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
+ uint16_t vxlan_port; /* current vxlan port pushed to NIC */
unsigned int flags;
unsigned int priv_flags;
@@ -176,6 +183,7 @@ struct enic {
uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
uint64_t tx_offload_mask; /* PKT_TX flags accepted */
};
@@ -305,11 +313,15 @@ int enic_clsf_init(struct enic *enic);
void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t enic_dummy_recv_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
diff --git a/drivers/net/enic/enic_compat.h b/drivers/net/enic/enic_compat.h
index c0af1ed2..ceb1b096 100644
--- a/drivers/net/enic/enic_compat.h
+++ b/drivers/net/enic/enic_compat.h
@@ -56,6 +56,11 @@
#define dev_debug(x, args...) dev_printk(DEBUG, args)
extern int enicpmd_logtype_flow;
+extern int enicpmd_logtype_init;
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
+ "%s" fmt "\n", __func__, ##args)
#define __le16 u16
#define __le32 u32
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 28630892..b3d57771 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -24,10 +24,6 @@
int enicpmd_logtype_init;
int enicpmd_logtype_flow;
-#define PMD_INIT_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
- "%s" fmt "\n", __func__, ##args)
-
#define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
/*
@@ -41,10 +37,9 @@ static const struct rte_pci_id pci_id_enic_map[] = {
};
#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
+#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
-RTE_INIT(enicpmd_init_log);
-static void
-enicpmd_init_log(void)
+RTE_INIT(enicpmd_init_log)
{
enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
if (enicpmd_logtype_init >= 0)
@@ -184,17 +179,21 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __rte_unused const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf)
{
int ret;
struct enic *enic = pmd_priv(eth_dev);
+ struct vnic_wq *wq;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
RTE_ASSERT(queue_idx < enic->conf_wq_count);
- eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
+ wq = &enic->wq[queue_idx];
+ wq->offloads = tx_conf->offloads |
+ eth_dev->data->dev_conf.txmode.offloads;
+ eth_dev->data->tx_queues[queue_idx] = (void *)wq;
ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
if (ret) {
@@ -476,12 +475,37 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
device_info->rx_offload_capa = enic->rx_offload_capa;
device_info->tx_offload_capa = enic->tx_offload_capa;
+ device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
device_info->reta_size = enic->reta_size;
device_info->hash_key_size = enic->hash_key_size;
device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
+ device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = enic->config.rq_desc_count,
+ .nb_min = ENIC_MIN_RQ_DESCS,
+ .nb_align = ENIC_ALIGN_DESCS,
+ };
+ device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = enic->config.wq_desc_count,
+ .nb_min = ENIC_MIN_WQ_DESCS,
+ .nb_align = ENIC_ALIGN_DESCS,
+ .nb_seg_max = ENIC_TX_XMIT_MAX,
+ .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
+ };
+ device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
+ .burst_size = ENIC_DEFAULT_RX_BURST,
+ .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
+ ENIC_DEFAULT_RX_RING_SIZE),
+ .nb_queues = ENIC_DEFAULT_RX_RINGS,
+ };
+ device_info->default_txportconf = (struct rte_eth_dev_portconf) {
+ .burst_size = ENIC_DEFAULT_TX_BURST,
+ .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
+ ENIC_DEFAULT_TX_RING_SIZE),
+ .nb_queues = ENIC_DEFAULT_TX_RINGS,
+ };
}
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
@@ -498,7 +522,8 @@ static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == enic_recv_pkts)
+ if (dev->rx_pkt_burst == enic_recv_pkts ||
+ dev->rx_pkt_burst == enic_noscatter_recv_pkts)
return ptypes;
return NULL;
}
@@ -731,15 +756,16 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
}
static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
- __rte_unused uint16_t tx_queue_id,
+ uint16_t tx_queue_id,
struct rte_eth_txq_info *qinfo)
{
struct enic *enic = pmd_priv(dev);
+ struct vnic_wq *wq = &enic->wq[tx_queue_id];
ENICPMD_FUNC_TRACE();
- qinfo->nb_desc = enic->config.wq_desc_count;
+ qinfo->nb_desc = wq->ring.desc_count;
memset(&qinfo->conf, 0, sizeof(qinfo->conf));
- qinfo->conf.offloads = enic->tx_offload_capa;
+ qinfo->conf.offloads = wq->offloads;
/* tx_thresh, and all the other fields are not applicable for enic */
}
@@ -763,6 +789,79 @@ static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
return 0;
}
+static int udp_tunnel_common_check(struct enic *enic,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
+ return -ENOTSUP;
+ if (!enic->overlay_offload) {
+ PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not "
+ "supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int update_vxlan_port(struct enic *enic, uint16_t port)
+{
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ port)) {
+ PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n");
+ return -EINVAL;
+ }
+ PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port);
+ enic->vxlan_port = port;
+ return 0;
+}
+
+static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ ret = udp_tunnel_common_check(enic, tnl);
+ if (ret)
+ return ret;
+ /*
+ * The NIC has 1 configurable VXLAN port number. "Adding" a new port
+ * number replaces it.
+ */
+ if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
+ PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n",
+ tnl->udp_port);
+ return -EINVAL;
+ }
+ return update_vxlan_port(enic, tnl->udp_port);
+}
+
+static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ ret = udp_tunnel_common_check(enic, tnl);
+ if (ret)
+ return ret;
+ /*
+ * Clear the previously set port number and restore the
+ * hardware default port number. Some drivers disable VXLAN
+ * offloads when there are no configured port numbers. But
+ * enic does not do that as VXLAN is part of overlay offload,
+ * which is tied to inner RSS and TSO.
+ */
+ if (tnl->udp_port != enic->vxlan_port) {
+ PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n",
+ tnl->udp_port);
+ return -EINVAL;
+ }
+ return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
+}
+
static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.dev_configure = enicpmd_dev_configure,
.dev_start = enicpmd_dev_start,
@@ -812,6 +911,8 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.reta_update = enicpmd_dev_rss_reta_update,
.rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
.rss_hash_update = enicpmd_dev_rss_hash_update,
+ .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del,
};
static int enic_parse_disable_overlay(__rte_unused const char *key,
@@ -833,23 +934,61 @@ static int enic_parse_disable_overlay(__rte_unused const char *key,
return 0;
}
+static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct enic *enic;
+
+ enic = (struct enic *)opaque;
+ if (strcmp(value, "trunk") == 0) {
+ /* Trunk mode: always tag */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
+ } else if (strcmp(value, "untag") == 0) {
+ /* Untag default VLAN mode: untag if VLAN = default VLAN */
+ enic->ig_vlan_rewrite_mode =
+ IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
+ } else if (strcmp(value, "priority") == 0) {
+ /*
+ * Priority-tag default VLAN mode: priority tag (VLAN header
+ * with ID=0) if VLAN = default
+ */
+ enic->ig_vlan_rewrite_mode =
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
+ } else if (strcmp(value, "pass") == 0) {
+ /* Pass through mode: do not touch tags */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
+ } else {
+ dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
+ ": expected=trunk|untag|priority|pass given=%s\n",
+ value);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int enic_check_devargs(struct rte_eth_dev *dev)
{
static const char *const valid_keys[] = {
- ENIC_DEVARG_DISABLE_OVERLAY, NULL};
+ ENIC_DEVARG_DISABLE_OVERLAY,
+ ENIC_DEVARG_IG_VLAN_REWRITE,
+ NULL};
struct enic *enic = pmd_priv(dev);
struct rte_kvargs *kvlist;
ENICPMD_FUNC_TRACE();
enic->disable_overlay = false;
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
if (!dev->device->devargs)
return 0;
kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
if (!kvlist)
return -EINVAL;
if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
- enic_parse_disable_overlay, enic) < 0) {
+ enic_parse_disable_overlay, enic) < 0 ||
+ rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
+ enic_parse_ig_vlan_rewrite, enic) < 0) {
rte_kvargs_free(kvlist);
return -EINVAL;
}
@@ -914,4 +1053,5 @@ RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_enic,
- ENIC_DEVARG_DISABLE_OVERLAY "=<0|1> ");
+ ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
+ ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a25d303d..fd940c58 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -69,12 +69,12 @@ enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
}
}
-static void enic_free_wq_buf(struct vnic_wq_buf *buf)
+static void enic_free_wq_buf(struct rte_mbuf **buf)
{
- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
+ struct rte_mbuf *mbuf = *buf;
rte_pktmbuf_free_seg(mbuf);
- buf->mb = NULL;
+ *buf = NULL;
}
static void enic_log_q_error(struct enic *enic)
@@ -320,6 +320,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
* enic_start_rq().
*/
rq->need_initial_post = true;
+ /* Initialize fetch index while RQ is disabled */
+ iowrite32(0, &rq->ctrl->fetch_index);
return 0;
}
@@ -345,7 +347,6 @@ enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
iowrite32(rq->posted_index, &rq->ctrl->posted_index);
- iowrite32(0, &rq->ctrl->fetch_index);
rte_rmb();
rq->need_initial_post = false;
}
@@ -492,6 +493,42 @@ static void enic_rxq_intr_deinit(struct enic *enic)
}
}
+static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
+{
+ struct wq_enet_desc *desc;
+ struct vnic_wq *wq;
+ unsigned int i;
+
+ /*
+ * Fill WQ descriptor fields that never change. Every descriptor is
+ * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH
+ * descriptors (i.e. request one completion update every 32 packets).
+ */
+ wq = &enic->wq[queue_idx];
+ desc = (struct wq_enet_desc *)wq->ring.descs;
+ for (i = 0; i < wq->ring.desc_count; i++, desc++) {
+ desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT;
+ if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1)
+ desc->header_length_flags |=
+ (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT);
+ }
+}
+
+static void pick_rx_handler(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev;
+
+ /* Use the non-scatter, simplified RX handler if possible. */
+ eth_dev = enic->rte_dev;
+ if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
+ PMD_INIT_LOG(DEBUG, " use the non-scatter Rx handler");
+ eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
+ } else {
+ PMD_INIT_LOG(DEBUG, " use the normal Rx handler");
+ eth_dev->rx_pkt_burst = &enic_recv_pkts;
+ }
+}
+
int enic_enable(struct enic *enic)
{
unsigned int index;
@@ -534,6 +571,22 @@ int enic_enable(struct enic *enic)
}
}
+ /*
+ * Use the simple TX handler if possible. All offloads must be
+ * disabled.
+ */
+ if (eth_dev->data->dev_conf.txmode.offloads == 0) {
+ PMD_INIT_LOG(DEBUG, " use the simple tx handler");
+ eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
+ for (index = 0; index < enic->wq_count; index++)
+ enic_prep_wq_for_simple_tx(enic, index);
+ } else {
+ PMD_INIT_LOG(DEBUG, " use the default tx handler");
+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+ }
+
+ pick_rx_handler(enic);
+
for (index = 0; index < enic->wq_count; index++)
enic_start_wq(enic, index);
for (index = 0; index < enic->rq_count; index++)
@@ -586,6 +639,19 @@ void enic_free_rq(void *rxq)
enic = vnic_dev_priv(rq_sop->vdev);
rq_data = &enic->rq[rq_sop->data_queue_idx];
+ if (rq_sop->free_mbufs) {
+ struct rte_mbuf **mb;
+ int i;
+
+ mb = rq_sop->free_mbufs;
+ for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs;
+ i < ENIC_RX_BURST_MAX; i++)
+ rte_pktmbuf_free(mb[i]);
+ rte_free(rq_sop->free_mbufs);
+ rq_sop->free_mbufs = NULL;
+ rq_sop->num_free_mbufs = 0;
+ }
+
enic_rxmbuf_queue_release(enic, rq_sop);
if (rq_data->in_use)
enic_rxmbuf_queue_release(enic, rq_data);
@@ -742,20 +808,20 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
}
/* number of descriptors have to be a multiple of 32 */
- nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
- nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
+ nb_sop_desc = (nb_desc / mbufs_per_pkt) & ENIC_ALIGN_DESCS_MASK;
+ nb_data_desc = (nb_desc - nb_sop_desc) & ENIC_ALIGN_DESCS_MASK;
rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
if (mbufs_per_pkt > 1) {
- min_sop = 64;
+ min_sop = ENIC_RX_BURST_MAX;
max_sop = ((enic->config.rq_desc_count /
- (mbufs_per_pkt - 1)) & ~0x1F);
+ (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK);
min_data = min_sop * (mbufs_per_pkt - 1);
max_data = enic->config.rq_desc_count;
} else {
- min_sop = 64;
+ min_sop = ENIC_RX_BURST_MAX;
max_sop = enic->config.rq_desc_count;
min_data = 0;
max_data = 0;
@@ -826,10 +892,21 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
goto err_free_sop_mbuf;
}
+ rq_sop->free_mbufs = (struct rte_mbuf **)
+ rte_zmalloc_socket("rq->free_mbufs",
+ sizeof(struct rte_mbuf *) *
+ ENIC_RX_BURST_MAX,
+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+ if (rq_sop->free_mbufs == NULL)
+ goto err_free_data_mbuf;
+ rq_sop->num_free_mbufs = 0;
+
rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
return 0;
+err_free_data_mbuf:
+ rte_free(rq_data->mbuf_ring);
err_free_sop_mbuf:
rte_free(rq_sop->mbuf_ring);
err_free_cq:
@@ -869,25 +946,15 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
static int instance;
wq->socket_id = socket_id;
- if (nb_desc) {
- if (nb_desc > enic->config.wq_desc_count) {
- dev_warning(enic,
- "WQ %d - number of tx desc in cmd line (%d)"\
- "is greater than that in the UCSM/CIMC adapter"\
- "policy. Applying the value in the adapter "\
- "policy (%d)\n",
- queue_idx, nb_desc, enic->config.wq_desc_count);
- } else if (nb_desc != enic->config.wq_desc_count) {
- enic->config.wq_desc_count = nb_desc;
- dev_info(enic,
- "TX Queues - effective number of descs:%d\n",
- nb_desc);
- }
- }
+ /*
+ * rte_eth_tx_queue_setup() checks min, max, and alignment. So just
+ * print an info message for diagnostics.
+ */
+ dev_info(enic, "TX Queues - effective number of descs:%d\n", nb_desc);
/* Allocate queue resources */
err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
- enic->config.wq_desc_count,
+ nb_desc,
sizeof(struct wq_enet_desc));
if (err) {
dev_err(enic, "error in allocation of wq\n");
@@ -895,7 +962,7 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
}
err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
- socket_id, enic->config.wq_desc_count,
+ socket_id, nb_desc,
sizeof(struct cq_enet_wq_desc));
if (err) {
vnic_wq_free(wq);
@@ -1197,7 +1264,7 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
- if (ENIC_SETTING(enic, RSSHASH_UDP_WEAK)) {
+ if (enic->udp_rss_weak) {
/*
* 'TCP' is not a typo. The "weak" version of
* UDP RSS requires both the TCP and UDP bits
@@ -1213,7 +1280,7 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
- if (ENIC_SETTING(enic, RSSHASH_UDP_WEAK))
+ if (enic->udp_rss_weak)
rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
}
} else {
@@ -1237,8 +1304,11 @@ int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
enic->rss_hf = rss_hf;
enic->rss_hash_type = rss_hash_type;
enic->rss_enable = rss_enable;
+ } else {
+ dev_err(enic, "Failed to update RSS configurations."
+ " hash=0x%x\n", rss_hash_type);
}
- return 0;
+ return ret;
}
int enic_set_vlan_strip(struct enic *enic)
@@ -1488,7 +1558,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
/* put back the real receive function */
rte_mb();
- eth_dev->rx_pkt_burst = enic_recv_pkts;
+ pick_rx_handler(enic);
rte_mb();
/* restart Rx traffic */
@@ -1591,7 +1661,18 @@ static int enic_dev_init(struct enic *enic)
PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_MASK;
enic->overlay_offload = true;
+ enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
dev_info(enic, "Overlay offload is enabled\n");
+ /*
+ * Reset the vxlan port to the default, as the NIC firmware
+ * does not reset it automatically and keeps the old setting.
+ */
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ ENIC_DEFAULT_VXLAN_PORT)) {
+ dev_err(enic, "failed to update vxlan port\n");
+ return -EINVAL;
+ }
}
return 0;
@@ -1643,8 +1724,10 @@ int enic_probe(struct enic *enic)
}
/* Set ingress vlan rewrite mode before vnic initialization */
+ dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n",
+ enic->ig_vlan_rewrite_mode);
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
- IG_VLAN_REWRITE_MODE_PASS_THRU);
+ enic->ig_vlan_rewrite_mode);
if (err) {
dev_err(enic,
"Failed to set ingress vlan rewrite mode, aborting.\n");
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 6b404c3c..8d493ffe 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -82,6 +82,8 @@ int enic_get_vnic_config(struct enic *enic)
"Error getting filter modes, %d\n", err);
return err;
}
+ vnic_dev_capable_udp_rss_weak(enic->vdev, &enic->nic_cfg_chk,
+ &enic->udp_rss_weak);
dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s\n",
((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" :
@@ -124,7 +126,7 @@ int enic_get_vnic_config(struct enic *enic)
ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
ENIC_SETTING(enic, RSS) ?
(ENIC_SETTING(enic, RSSHASH_UDPIPV4) ? "+UDP" :
- ((ENIC_SETTING(enic, RSSHASH_UDP_WEAK) ? "+udp" :
+ ((enic->udp_rss_weak ? "+udp" :
"yes"))) : "no",
c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
@@ -161,7 +163,7 @@ int enic_get_vnic_config(struct enic *enic)
if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
ETH_RSS_IPV6_TCP_EX;
- if (ENIC_SETTING(enic, RSSHASH_UDP_WEAK))
+ if (enic->udp_rss_weak)
enic->flow_type_rss_offloads |=
ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
ETH_RSS_IPV6_UDP_EX;
@@ -181,7 +183,9 @@ int enic_get_vnic_config(struct enic *enic)
* Default hardware capabilities. enic_dev_init() may add additional
* flags if it enables overlay offloads.
*/
+ enic->tx_queue_offload_capa = 0;
enic->tx_offload_capa =
+ enic->tx_queue_offload_capa |
DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -235,6 +239,7 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en)
{
+ enum vnic_devcmd_cmd cmd;
u64 a0, a1;
u32 nic_cfg;
int wait = 1000;
@@ -245,8 +250,8 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
a0 = nic_cfg;
a1 = 0;
-
- return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+ cmd = enic->nic_cfg_chk ? CMD_NIC_CFG_CHK : CMD_NIC_CFG;
+ return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait);
}
int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index e68f1307..3786bc0e 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -16,6 +16,13 @@
#define ENIC_MIN_RQ_DESCS 64
#define ENIC_MAX_RQ_DESCS 4096
+/* A descriptor ring has a multiple of 32 descriptors */
+#define ENIC_ALIGN_DESCS 32
+#define ENIC_ALIGN_DESCS_MASK ~(ENIC_ALIGN_DESCS - 1)
+
+/* Request a completion index every 32 buffers (roughly packets) */
+#define ENIC_WQ_CQ_THRESH 32
+
#define ENIC_MIN_MTU 68
/* Does not include (possible) inserted VLAN tag and FCS */
@@ -30,6 +37,15 @@
#define ENIC_NON_TSO_MAX_DESC 16
#define ENIC_DEFAULT_RX_FREE_THRESH 32
#define ENIC_TX_XMIT_MAX 64
+#define ENIC_RX_BURST_MAX 64
+
+/* Defaults for dev_info.default_{rx,tx}portconf */
+#define ENIC_DEFAULT_RX_BURST 32
+#define ENIC_DEFAULT_RX_RINGS 1
+#define ENIC_DEFAULT_RX_RING_SIZE 512
+#define ENIC_DEFAULT_TX_BURST 32
+#define ENIC_DEFAULT_TX_RINGS 1
+#define ENIC_DEFAULT_TX_RING_SIZE 512
#define ENIC_RSS_DEFAULT_CPU 0
#define ENIC_RSS_BASE_CPU 0
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 8853a204..7129e121 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -137,51 +137,81 @@ enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
*/
static const uint32_t cq_type_table[128] __rte_cache_aligned = {
[0x00] = RTE_PTYPE_UNKNOWN,
+ [0x01] = RTE_PTYPE_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER,
[0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
[0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG,
[0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
[0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP,
[0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
[0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP,
[0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
[0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG,
- [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
- [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
- [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_FRAG,
[0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
[0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG,
[0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
[0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP,
[0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
[0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP,
+ [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
/* All others reserved */
};
cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
@@ -280,7 +310,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
struct vnic_rq *rq;
struct enic *enic = vnic_dev_priv(sop_rq->vdev);
uint16_t cq_idx;
- uint16_t rq_idx;
+ uint16_t rq_idx, max_rx;
uint16_t rq_num;
struct rte_mbuf *nmb, *rxmb;
uint16_t nb_rx = 0;
@@ -295,19 +325,23 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
+ color = cq->last_color;
data_rq = &enic->rq[sop_rq->data_queue_idx];
- while (nb_rx < nb_pkts) {
+ /* Receive until the end of the ring, at most. */
+ max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx);
+
+ while (max_rx) {
volatile struct rq_enet_desc *rqd_ptr;
struct cq_desc cqd;
uint8_t packet_error;
uint16_t ciflags;
+ max_rx--;
+
/* Check for pkts available */
- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
- & CQ_DESC_COLOR_MASK;
- if (color == cq->last_color)
+ if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
break;
/* Get the cq descriptor and extract rq info from it */
@@ -331,13 +365,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Get the mbuf to return and replace with one just allocated */
rxmb = rq->mbuf_ring[rq_idx];
rq->mbuf_ring[rq_idx] = nmb;
-
- /* Increment cqd, rqd, mbuf_table index */
cq_idx++;
- if (unlikely(cq_idx == cq->ring.desc_count)) {
- cq_idx = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
/* Prefetch next mbuf & desc while processing current one */
cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
@@ -389,6 +417,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->packet_type =
enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
enic_cq_rx_to_pkt_flags(&cqd, first_seg);
+
/* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
if (tnl) {
first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
@@ -408,6 +437,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* store the mbuf address into the next entry of the array */
rx_pkts[nb_rx++] = first_seg;
}
+ if (unlikely(cq_idx == cq->ring.desc_count)) {
+ cq_idx = 0;
+ cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
+ }
sop_rq->pkt_first_seg = first_seg;
sop_rq->pkt_last_seg = last_seg;
@@ -441,9 +474,123 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
+uint16_t
+enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mb, **rx, **rxmb;
+ uint16_t cq_idx, nb_rx, max_rx;
+ struct cq_enet_rq_desc *cqd;
+ struct rq_enet_desc *rqd;
+ unsigned int port_id;
+ struct vnic_cq *cq;
+ struct vnic_rq *rq;
+ struct enic *enic;
+ uint8_t color;
+ bool overlay;
+ bool tnl;
+
+ rq = rx_queue;
+ enic = vnic_dev_priv(rq->vdev);
+ cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ cq_idx = cq->to_clean;
+
+ /*
+ * Fill up the reserve of free mbufs. Below, we restock the receive
+ * ring with these mbufs to avoid allocation failures.
+ */
+ if (rq->num_free_mbufs == 0) {
+ if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
+ ENIC_RX_BURST_MAX))
+ return 0;
+ rq->num_free_mbufs = ENIC_RX_BURST_MAX;
+ }
+
+ /* Receive until the end of the ring, at most. */
+ max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
+ max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
+
+ cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
+ color = cq->last_color;
+ rxmb = rq->mbuf_ring + cq_idx;
+ port_id = enic->port_id;
+ overlay = enic->overlay_offload;
+
+ rx = rx_pkts;
+ while (max_rx) {
+ max_rx--;
+ if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
+ break;
+ if (unlikely(cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
+ rte_pktmbuf_free(*rxmb++);
+ rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
+ cqd++;
+ continue;
+ }
+
+ mb = *rxmb++;
+ /* prefetch mbuf data for caller */
+ rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
+ RTE_PKTMBUF_HEADROOM));
+ mb->data_len = cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ mb->pkt_len = mb->data_len;
+ mb->port = port_id;
+ tnl = overlay && (cqd->completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
+ mb->packet_type =
+ enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
+ tnl);
+ enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
+ /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+ if (tnl) {
+ mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+ }
+ cqd++;
+ *rx++ = mb;
+ }
+ /* Number of descriptors visited */
+ nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
+ if (nb_rx == 0)
+ return 0;
+ rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
+ rxmb = rq->mbuf_ring + cq_idx;
+ cq_idx += nb_rx;
+ rq->rx_nb_hold += nb_rx;
+ if (unlikely(cq_idx == cq->ring.desc_count)) {
+ cq_idx = 0;
+ cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
+ }
+ cq->to_clean = cq_idx;
+
+ memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
+ sizeof(struct rte_mbuf *) * nb_rx);
+ rq->num_free_mbufs -= nb_rx;
+ while (nb_rx) {
+ nb_rx--;
+ mb = *rxmb++;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+ rqd++;
+ }
+ if (rq->rx_nb_hold > rq->rx_free_thresh) {
+ rq->posted_index = enic_ring_add(rq->ring.desc_count,
+ rq->posted_index,
+ rq->rx_nb_hold);
+ rq->rx_nb_hold = 0;
+ rte_wmb();
+ iowrite32_relaxed(rq->posted_index,
+ &rq->ctrl->posted_index);
+ }
+
+ return rx - rx_pkts;
+}
+
static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
{
- struct vnic_wq_buf *buf;
+ struct rte_mbuf *buf;
struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
unsigned int nb_to_free, nb_free = 0, i;
struct rte_mempool *pool;
@@ -453,13 +600,10 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
+ 1;
tail_idx = wq->tail_idx;
- buf = &wq->bufs[tail_idx];
- pool = ((struct rte_mbuf *)buf->mb)->pool;
+ pool = wq->bufs[tail_idx]->pool;
for (i = 0; i < nb_to_free; i++) {
- buf = &wq->bufs[tail_idx];
- m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
- buf->mb = NULL;
-
+ buf = wq->bufs[tail_idx];
+ m = rte_pktmbuf_prefree_seg(buf);
if (unlikely(m == NULL)) {
tail_idx = enic_ring_incr(desc_count, tail_idx);
continue;
@@ -508,6 +652,10 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (i = 0; i != nb_pkts; i++) {
m = tx_pkts[i];
+ if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+ rte_errno = EINVAL;
+ return i;
+ }
ol_flags = m->ol_flags;
if (ol_flags & wq->tx_offload_notsup_mask) {
rte_errno = ENOTSUP;
@@ -544,12 +692,11 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t ol_flags_mask;
unsigned int wq_desc_avail;
int head_idx;
- struct vnic_wq_buf *buf;
unsigned int desc_count;
struct wq_enet_desc *descs, *desc_p, desc_tmp;
uint16_t mss;
uint8_t vlan_tag_insert;
- uint8_t eop;
+ uint8_t eop, cq;
uint64_t bus_addr;
uint8_t offload_mode;
uint16_t header_len;
@@ -632,15 +779,18 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
break;
}
}
-
-
+ wq->cq_pend++;
+ cq = 0;
+ if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
+ cq = 1;
+ wq->cq_pend = 0;
+ }
wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
- offload_mode, eop, eop, 0, vlan_tag_insert,
+ offload_mode, eop, cq, 0, vlan_tag_insert,
vlan_id, 0);
*desc_p = desc_tmp;
- buf = &wq->bufs[head_idx];
- buf->mb = (void *)tx_pkt;
+ wq->bufs[head_idx] = tx_pkt;
head_idx = enic_ring_incr(desc_count, head_idx);
wq_desc_avail--;
@@ -649,20 +799,26 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_pkt->next) {
data_len = tx_pkt->data_len;
- if (tx_pkt->next == NULL)
+ wq->cq_pend++;
+ cq = 0;
+ if (tx_pkt->next == NULL) {
eop = 1;
+ if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
+ cq = 1;
+ wq->cq_pend = 0;
+ }
+ }
desc_p = descs + head_idx;
bus_addr = (dma_addr_t)(tx_pkt->buf_iova
+ tx_pkt->data_off);
wq_enet_desc_enc((struct wq_enet_desc *)
&desc_tmp, bus_addr, data_len,
- mss, 0, offload_mode, eop, eop,
+ mss, 0, offload_mode, eop, cq,
0, vlan_tag_insert, vlan_id,
0);
*desc_p = desc_tmp;
- buf = &wq->bufs[head_idx];
- buf->mb = (void *)tx_pkt;
+ wq->bufs[head_idx] = tx_pkt;
head_idx = enic_ring_incr(desc_count, head_idx);
wq_desc_avail--;
}
@@ -678,4 +834,81 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return index;
}
+static void enqueue_simple_pkts(struct rte_mbuf **pkts,
+ struct wq_enet_desc *desc,
+ uint16_t n,
+ struct enic *enic)
+{
+ struct rte_mbuf *p;
+
+ while (n) {
+ n--;
+ p = *pkts++;
+ desc->address = p->buf_iova + p->data_off;
+ desc->length = p->pkt_len;
+ /*
+ * The app should not send oversized
+ * packets. tx_pkt_prepare includes a check as
+ * well. But some apps ignore the device max size and
+ * tx_pkt_prepare. Oversized packets cause WQ errrors
+ * and the NIC ends up disabling the whole WQ. So
+ * truncate packets..
+ */
+ if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+ desc->length = ENIC_TX_MAX_PKT_SIZE;
+ rte_atomic64_inc(&enic->soft_stats.tx_oversized);
+ }
+ desc++;
+ }
+}
+
+uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ unsigned int head_idx, desc_count;
+ struct wq_enet_desc *desc;
+ struct vnic_wq *wq;
+ struct enic *enic;
+ uint16_t rem, n;
+
+ wq = (struct vnic_wq *)tx_queue;
+ enic = vnic_dev_priv(wq->vdev);
+ enic_cleanup_wq(enic, wq);
+ /* Will enqueue this many packets in this call */
+ nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail);
+ if (nb_pkts == 0)
+ return 0;
+
+ head_idx = wq->head_idx;
+ desc_count = wq->ring.desc_count;
+
+ /* Descriptors until the end of the ring */
+ n = desc_count - head_idx;
+ n = RTE_MIN(nb_pkts, n);
+
+ /* Save mbuf pointers to free later */
+ memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n);
+
+ /* Enqueue until the ring end */
+ rem = nb_pkts - n;
+ desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx;
+ enqueue_simple_pkts(tx_pkts, desc, n, enic);
+
+ /* Wrap to the start of the ring */
+ if (rem) {
+ tx_pkts += n;
+ memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem);
+ desc = (struct wq_enet_desc *)wq->ring.descs;
+ enqueue_simple_pkts(tx_pkts, desc, rem, enic);
+ }
+ rte_wmb();
+ /* Update head_idx and desc_avail */
+ wq->ring.desc_avail -= nb_pkts;
+ head_idx += nb_pkts;
+ if (head_idx >= desc_count)
+ head_idx -= desc_count;
+ wq->head_idx = head_idx;
+ iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
+ return nb_pkts;
+}
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index eafbb75d..657919f9 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -328,6 +328,7 @@ rte_pmd_failsafe_probe(struct rte_vdev_device *vdev)
}
/* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &failsafe_ops;
+ eth_dev->device = &vdev->device;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -353,9 +354,7 @@ static struct rte_vdev_driver failsafe_drv = {
RTE_PMD_REGISTER_VDEV(net_failsafe, failsafe_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_failsafe, PMD_FAILSAFE_PARAM_STRING);
-RTE_INIT(failsafe_init_log);
-static void
-failsafe_init_log(void)
+RTE_INIT(failsafe_init_log)
{
failsafe_logtype = rte_log_register("pmd.net.failsafe");
if (failsafe_logtype >= 0)
diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c
index 2c002b16..626883ce 100644
--- a/drivers/net/failsafe/failsafe_args.c
+++ b/drivers/net/failsafe/failsafe_args.c
@@ -63,7 +63,7 @@ fs_parse_device(struct sub_device *sdev, char *args)
d = &sdev->devargs;
DEBUG("%s", args);
- ret = rte_devargs_parse(d, "%s", args);
+ ret = rte_devargs_parse(d, args);
if (ret) {
DEBUG("devargs parsing failed with code %d", ret);
return ret;
diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c
index 5672f396..ce1633f1 100644
--- a/drivers/net/failsafe/failsafe_eal.c
+++ b/drivers/net/failsafe/failsafe_eal.c
@@ -86,7 +86,7 @@ fs_bus_init(struct rte_eth_dev *dev)
else
snprintf(devstr, sizeof(devstr), "%s",
rte_eth_devices[pid].device->name);
- ret = rte_devargs_parse(da, "%s", devstr);
+ ret = rte_devargs_parse(da, devstr);
if (ret) {
ERROR("Probed devargs parsing failed with code"
" %d", ret);
diff --git a/drivers/net/failsafe/meson.build b/drivers/net/failsafe/meson.build
new file mode 100644
index 00000000..a249ff4a
--- /dev/null
+++ b/drivers/net/failsafe/meson.build
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+cflags += '-std=gnu99'
+cflags += '-D_DEFAULT_SOURCE'
+cflags += '-D_XOPEN_SOURCE=700'
+cflags += '-pedantic'
+if host_machine.system() == 'linux'
+ cflags += '-DLINUX'
+else
+ cflags += '-DBSD'
+endif
+
+allow_experimental_apis = true
+
+sources = files('failsafe_args.c',
+ 'failsafe.c',
+ 'failsafe_eal.c',
+ 'failsafe_ether.c',
+ 'failsafe_flow.c',
+ 'failsafe_intr.c',
+ 'failsafe_ops.c',
+ 'failsafe_rxtx.c')
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index ef307809..dc814855 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -106,9 +106,6 @@
#define FM10K_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define FM10K_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
-#define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
struct fm10k_macvlan_filter_info {
uint16_t vlan_num; /* Total VLAN number */
uint16_t mac_num; /* Total mac number */
@@ -329,6 +326,13 @@ uint16_t fm10k_recv_scattered_pkts(void *rx_queue,
int
fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int
+fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+
+int
+fm10k_dev_tx_descriptor_status(void *rx_queue, uint16_t offset);
+
+
uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 3ff1b0e0..541a49b7 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -451,8 +451,10 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if ((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP) == 0)
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
/* multipe queue mode checking */
@@ -808,52 +810,50 @@ static int
fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int err = -1;
+ int err;
uint32_t reg;
struct fm10k_rx_queue *rxq;
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
- err = rx_queue_reset(rxq);
- if (err == -ENOMEM) {
- PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
- return err;
- } else if (err == -EINVAL) {
- PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
- " %d", err);
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
+ err = rx_queue_reset(rxq);
+ if (err == -ENOMEM) {
+ PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
+ return err;
+ } else if (err == -EINVAL) {
+ PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
+ " %d", err);
+ return err;
+ }
- /* Setup the HW Rx Head and Tail Descriptor Pointers
- * Note: this must be done AFTER the queue is enabled on real
- * hardware, but BEFORE the queue is enabled when using the
- * emulation platform. Do it in both places for now and remove
- * this comment and the following two register writes when the
- * emulation platform is no longer being used.
- */
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled on real
+ * hardware, but BEFORE the queue is enabled when using the
+ * emulation platform. Do it in both places for now and remove
+ * this comment and the following two register writes when the
+ * emulation platform is no longer being used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
- /* Set PF ownership flag for PF devices */
- reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
- if (hw->mac.type == fm10k_mac_pf)
- reg |= FM10K_RXQCTL_PF;
- reg |= FM10K_RXQCTL_ENABLE;
- /* enable RX queue */
- FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
- FM10K_WRITE_FLUSH(hw);
+ /* Set PF ownership flag for PF devices */
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
+ if (hw->mac.type == fm10k_mac_pf)
+ reg |= FM10K_RXQCTL_PF;
+ reg |= FM10K_RXQCTL_ENABLE;
+ /* enable RX queue */
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
+ FM10K_WRITE_FLUSH(hw);
- /* Setup the HW Rx Head and Tail Descriptor Pointers
- * Note: this must be done AFTER the queue is enabled
- */
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- }
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -863,14 +863,12 @@ fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- /* Disable RX queue */
- rx_queue_disable(hw, rx_queue_id);
+ /* Disable RX queue */
+ rx_queue_disable(hw, rx_queue_id);
- /* Free mbuf and clean HW ring */
- rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
+ /* Free mbuf and clean HW ring */
+ rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -882,28 +880,23 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
/** @todo - this should be defined in the shared code */
#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
- int err = 0;
+ struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
-
- q->ops->reset(q);
+ q->ops->reset(q);
- /* reset head and tail pointers */
- FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
+ /* reset head and tail pointers */
+ FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
- /* enable TX queue */
- FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
- FM10K_TXDCTL_ENABLE | txdctl);
- FM10K_WRITE_FLUSH(hw);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
- err = -1;
+ /* enable TX queue */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
+ FM10K_TXDCTL_ENABLE | txdctl);
+ FM10K_WRITE_FLUSH(hw);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -913,11 +906,9 @@ fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- tx_queue_disable(hw, tx_queue_id);
- tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
+ tx_queue_disable(hw, tx_queue_id);
+ tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -2837,6 +2828,8 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = {
.tx_queue_setup = fm10k_tx_queue_setup,
.tx_queue_release = fm10k_tx_queue_release,
.rx_descriptor_done = fm10k_dev_rx_descriptor_done,
+ .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
+ .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
.rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
.reta_update = fm10k_reta_update,
@@ -3290,9 +3283,7 @@ RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(fm10k_init_log);
-static void
-fm10k_init_log(void)
+RTE_INIT(fm10k_init_log)
{
fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
if (fm10k_logtype_init >= 0)
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index 9320748c..4a5b46ec 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -389,6 +389,84 @@ fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
return ret;
}
+int
+fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ volatile union fm10k_rx_desc *rxdp;
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint16_t nb_hold, trigger_last;
+ uint16_t desc;
+ int ret;
+
+ if (unlikely(offset >= rxq->nb_desc)) {
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
+ return 0;
+ }
+
+ if (rxq->next_trigger < rxq->alloc_thresh)
+ trigger_last = rxq->next_trigger +
+ rxq->nb_desc - rxq->alloc_thresh;
+ else
+ trigger_last = rxq->next_trigger - rxq->alloc_thresh;
+
+ if (rxq->next_dd < trigger_last)
+ nb_hold = rxq->next_dd + rxq->nb_desc - trigger_last;
+ else
+ nb_hold = rxq->next_dd - trigger_last;
+
+ if (offset >= rxq->nb_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->next_dd + offset;
+ if (desc >= rxq->nb_desc)
+ desc -= rxq->nb_desc;
+
+ rxdp = &rxq->hw_ring[desc];
+
+ ret = !!(rxdp->w.status &
+ rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
+
+ return ret;
+}
+
+int
+fm10k_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ volatile struct fm10k_tx_desc *txdp;
+ struct fm10k_tx_queue *txq = tx_queue;
+ uint16_t desc;
+ uint16_t next_rs = txq->nb_desc;
+ struct fifo rs_tracker = txq->rs_tracker;
+ struct fifo *r = &rs_tracker;
+
+ if (unlikely(offset >= txq->nb_desc))
+ return -EINVAL;
+
+ desc = txq->next_free + offset;
+ /* go to next desc that has the RS bit */
+ desc = (desc / txq->rs_thresh + 1) *
+ txq->rs_thresh - 1;
+
+ if (desc >= txq->nb_desc) {
+ desc -= txq->nb_desc;
+ if (desc >= txq->nb_desc)
+ desc -= txq->nb_desc;
+ }
+
+ r->head = r->list;
+ for ( ; r->head != r->endp; ) {
+ if (*r->head >= desc && *r->head < next_rs)
+ next_rs = *r->head;
+ ++r->head;
+ }
+
+ txdp = &txq->hw_ring[next_rs];
+ if (txdp->flags & FM10K_TXD_FLAG_DONE)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
/*
* Free multiple TX mbuf at a time if they are in the same pool
*
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 13c5d329..85a6a867 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -42,6 +42,8 @@
#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
+#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
+#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
#define I40E_CLEAR_PXE_WAIT_MS 200
@@ -401,6 +403,13 @@ static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
int i40e_logtype_init;
int i40e_logtype_driver;
+static const char *const valid_keys[] = {
+ ETH_I40E_FLOATING_VEB_ARG,
+ ETH_I40E_FLOATING_VEB_LIST_ARG,
+ ETH_I40E_SUPPORT_MULTI_DRIVER,
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG,
+ NULL};
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -698,12 +707,16 @@ i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
uint32_t reg_val)
{
uint32_t ori_reg_val;
+ struct rte_eth_dev *dev;
ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
i40e_write_rx_ctl(hw, reg_addr, reg_val);
- PMD_DRV_LOG(DEBUG,
- "Global register [0x%08x] original: 0x%08x, after: 0x%08x",
- reg_addr, ori_reg_val, reg_val);
+ if (ori_reg_val != reg_val)
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%08x, new: 0x%08x",
+ dev->device->name, reg_addr, ori_reg_val, reg_val);
}
RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
@@ -730,7 +743,6 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
*/
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
- i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
}
static inline void i40e_config_automask(struct i40e_pf *pf)
@@ -849,7 +861,7 @@ config_vf_floating_veb(struct rte_devargs *devargs,
if (devargs == NULL)
return;
- kvlist = rte_kvargs_parse(devargs->args, NULL);
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
if (kvlist == NULL)
return;
@@ -890,7 +902,7 @@ is_floating_veb_supported(struct rte_devargs *devargs)
if (devargs == NULL)
return 0;
- kvlist = rte_kvargs_parse(devargs->args, NULL);
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
if (kvlist == NULL)
return 0;
@@ -1097,8 +1109,6 @@ i40e_init_queue_region_conf(struct rte_eth_dev *dev)
memset(info, 0, sizeof(struct i40e_queue_regions));
}
-#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
-
static int
i40e_parse_multi_drv_handler(__rte_unused const char *key,
const char *value,
@@ -1130,9 +1140,8 @@ static int
i40e_support_multi_driver(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- static const char *const valid_keys[] = {
- ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
struct rte_kvargs *kvlist;
+ int kvargs_count;
/* Enable global configuration by default */
pf->support_multi_driver = false;
@@ -1144,7 +1153,13 @@ i40e_support_multi_driver(struct rte_eth_dev *dev)
if (!kvlist)
return -EINVAL;
- if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
"the first invalid or last valid one is used !",
ETH_I40E_SUPPORT_MULTI_DRIVER);
@@ -1165,6 +1180,7 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
uint64_t ori_reg_val;
+ struct rte_eth_dev *dev;
int ret;
ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
@@ -1174,11 +1190,13 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw,
reg_addr);
return -EIO;
}
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
- PMD_DRV_LOG(DEBUG,
- "Global register [0x%08x] original: 0x%"PRIx64
- ", after: 0x%"PRIx64,
- reg_addr, ori_reg_val, reg_val);
+ if (ori_reg_val != reg_val)
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%"PRIx64", after: 0x%"PRIx64,
+ dev->device->name, reg_addr, ori_reg_val, reg_val);
return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
}
@@ -1236,6 +1254,13 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
hw->bus.func = pci_dev->addr.function;
hw->adapter_stopped = 0;
+ /*
+ * Switch Tag value should not be identical to either the First Tag
+ * or Second Tag values. So set something other than common Ethertype
+ * for internal switching.
+ */
+ hw->switch_tag = 0xffff;
+
/* Check if need to support multi-driver */
i40e_support_multi_driver(dev);
@@ -1299,7 +1324,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
PMD_INIT_LOG(DEBUG,
"Global register 0x%08x is changed with 0x28",
I40E_GLQF_L3_MAP(40));
- i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
}
/* Need the special FW version to support floating VEB */
@@ -1586,7 +1610,6 @@ void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
- i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
}
static int
@@ -1829,8 +1852,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
/* Write first RX queue to Link list register as the head element */
if (vsi->type != I40E_VSI_SRIOV) {
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1,
- pf->support_multi_driver);
+ i40e_calc_itr_interval(1, pf->support_multi_driver);
if (msix_vect == I40E_MISC_VEC_ID) {
I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
@@ -2026,27 +2048,40 @@ i40e_phy_conf_link(struct i40e_hw *hw,
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
enum i40e_aq_phy_type cnt;
+ uint8_t avail_speed;
uint32_t phy_type_mask = 0;
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_LOW_POWER;
- const uint8_t advt = I40E_LINK_SPEED_40GB |
- I40E_LINK_SPEED_25GB |
- I40E_LINK_SPEED_10GB |
- I40E_LINK_SPEED_1GB |
- I40E_LINK_SPEED_100MB;
int ret = -ENOTSUP;
+ /* To get phy capabilities of available speeds. */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
+ NULL);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
+ status);
+ return ret;
+ }
+ avail_speed = phy_ab.link_speed;
+ /* To get the current phy config. */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
NULL);
- if (status)
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
+ status);
return ret;
+ }
- /* If link already up, no need to set up again */
- if (is_up && phy_ab.phy_type != 0)
+ /* If link needs to go up and it is in autoneg mode the speed is OK,
+ * no need to set up again.
+ */
+ if (is_up && phy_ab.phy_type != 0 &&
+ abilities & I40E_AQ_PHY_AN_ENABLED &&
+ phy_ab.link_speed != 0)
return I40E_SUCCESS;
memset(&phy_conf, 0, sizeof(phy_conf));
@@ -2055,18 +2090,20 @@ i40e_phy_conf_link(struct i40e_hw *hw,
abilities &= ~mask;
abilities |= phy_ab.abilities & mask;
- /* update ablities and speed */
- if (abilities & I40E_AQ_PHY_AN_ENABLED)
- phy_conf.link_speed = advt;
- else
- phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
-
phy_conf.abilities = abilities;
+ /* If link needs to go up, but the force speed is not supported,
+ * Warn users and config the default available speeds.
+ */
+ if (is_up && !(force_speed & avail_speed)) {
+ PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
+ phy_conf.link_speed = avail_speed;
+ } else {
+ phy_conf.link_speed = is_up ? force_speed : avail_speed;
+ }
-
- /* To enable link, phy_type mask needs to include each type */
- for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ /* PHY type mask needs to include each type except PHY type extension */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
phy_type_mask |= 1 << cnt;
/* use get_phy_abilities_resp value for the rest */
@@ -2099,11 +2136,18 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *conf = &dev->data->dev_conf;
+ if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ conf->link_speeds = ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_100M;
+ }
speed = i40e_parse_link_speeds(conf->link_speeds);
- abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
- abilities |= I40E_AQ_PHY_AN_ENABLED;
- abilities |= I40E_AQ_PHY_LINK_ENABLED;
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
+ I40E_AQ_PHY_AN_ENABLED |
+ I40E_AQ_PHY_LINK_ENABLED;
return i40e_phy_conf_link(hw, abilities, speed, true);
}
@@ -2220,13 +2264,6 @@ i40e_dev_start(struct rte_eth_dev *dev)
}
/* Apply link configure */
- if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G)) {
- PMD_DRV_LOG(ERR, "Invalid link setting");
- goto err_up;
- }
ret = i40e_apply_link_speed(dev);
if (I40E_SUCCESS != ret) {
PMD_DRV_LOG(ERR, "Fail to apply link setting");
@@ -3327,6 +3364,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -3501,8 +3539,6 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
"Global register 0x%08x is changed with value 0x%08x",
I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
- i40e_global_cfg_warning(I40E_WARNING_TPID);
-
return 0;
}
@@ -3797,7 +3833,6 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
<< I40E_KILOSHIFT);
- i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
} else {
PMD_DRV_LOG(ERR,
"Water marker configuration is not supported.");
@@ -4334,7 +4369,6 @@ i40e_get_cap(struct i40e_hw *hw)
}
#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
-#define QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
static int i40e_pf_parse_vf_queue_number_handler(const char *key,
const char *value,
@@ -4368,9 +4402,9 @@ static int i40e_pf_parse_vf_queue_number_handler(const char *key,
static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
{
- static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_kvargs *kvlist;
+ int kvargs_count;
/* set default queue number per VF as 4 */
pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
@@ -4382,12 +4416,18 @@ static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
if (kvlist == NULL)
return -(EINVAL);
- if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
"the first invalid or last valid one is used !",
- QUEUE_NUM_PER_VF_ARG);
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG);
- rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
+ rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
i40e_pf_parse_vf_queue_number_handler, pf);
rte_kvargs_free(kvlist);
@@ -7565,6 +7605,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7608,13 +7649,14 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -7624,6 +7666,7 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7652,10 +7695,13 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* For MPLSoGRE */
memset(&filter_replace, 0,
@@ -7678,13 +7724,14 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -7694,6 +7741,7 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7729,10 +7777,13 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* for GTP-U */
memset(&filter_replace, 0,
@@ -7761,13 +7812,14 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -7777,6 +7829,7 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7804,10 +7857,13 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* for GTP-U */
memset(&filter_replace, 0,
@@ -7829,13 +7885,14 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -8395,7 +8452,6 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
"with value 0x%08x",
I40E_GL_PRS_FVBM(2), reg);
- i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
} else {
ret = 0;
}
@@ -8661,7 +8717,6 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
I40E_GLQF_HSYM(j),
reg);
}
- i40e_global_cfg_warning(I40E_WARNING_HSYM);
}
}
@@ -8687,7 +8742,6 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
goto out;
i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
- i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
out:
I40E_WRITE_FLUSH(hw);
@@ -9280,12 +9334,17 @@ void
i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
+ struct rte_eth_dev *dev;
- if (reg != val)
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ if (reg != val) {
i40e_write_rx_ctl(hw, addr, val);
- PMD_DRV_LOG(DEBUG,
- "Global register [0x%08x] original: 0x%08x, after: 0x%08x",
- addr, reg, (uint32_t)i40e_read_rx_ctl(hw, addr));
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%08x, new: 0x%08x",
+ dev->device->name, addr, reg,
+ (uint32_t)i40e_read_rx_ctl(hw, addr));
+ }
}
static void
@@ -9359,12 +9418,6 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
pf->hash_input_set[pctype] = input_set;
pf->fdir.input_set[pctype] = input_set;
}
-
- if (!pf->support_multi_driver) {
- i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
- i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
- i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
- }
}
int
@@ -9430,7 +9483,6 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
- i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
for (i = 0; i < num; i++)
i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
@@ -9439,7 +9491,6 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
0);
- i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
I40E_WRITE_FLUSH(hw);
pf->hash_input_set[pctype] = input_set;
@@ -9520,7 +9571,6 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
i40e_check_write_global_reg(hw,
I40E_GLQF_FD_MSK(i, pctype),
0);
- i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
} else {
PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
}
@@ -10003,6 +10053,60 @@ i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+/*
+ * GL_SWR_PM_UP_THR:
+ * The value is not impacted from the link speed, its value is set according
+ * to the total number of ports for a better pipe-monitor configuration.
+ */
+static bool
+i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
+{
+#define I40E_GL_SWR_PM_EF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
+
+#define I40E_GL_SWR_PM_SF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
+
+ static const struct {
+ uint16_t device_id;
+ uint32_t val;
+ } swr_pm_table[] = {
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
+
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
+ };
+ uint32_t i;
+
+ if (value == NULL) {
+ PMD_DRV_LOG(ERR, "value is NULL");
+ return false;
+ }
+
+ for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
+ if (hw->device_id == swr_pm_table[i].device_id) {
+ *value = swr_pm_table[i].val;
+
+ PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
+ "value - 0x%08x",
+ hw->device_id, *value);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int
i40e_dev_sync_phy_type(struct i40e_hw *hw)
{
@@ -10067,13 +10171,16 @@ i40e_configure_registers(struct i40e_hw *hw)
}
if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
- I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_SF_VALUE;
- else /* For X710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_EF_VALUE;
+ uint32_t cfg_val;
+
+ if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
+ PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
+ "GL_SWR_PM_UP_THR value fixup",
+ hw->device_id);
+ continue;
+ }
+
+ reg_table[i].val = cfg_val;
}
ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
@@ -12070,7 +12177,8 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_TUNNEL_GRENAT;
in_tunnel = true;
- } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
+ } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
+ !strncasecmp(name, "L2TPV2", 6)) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_TUNNEL_L2TP;
in_tunnel = true;
@@ -12214,6 +12322,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
if (pf->support_multi_driver) {
PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
@@ -12250,10 +12359,14 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
&filter_replace_buf);
if (ret != I40E_SUCCESS)
return ret;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* Apply the second L2 cloud filter */
memset(&filter_replace, 0,
@@ -12275,13 +12388,14 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!ret) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
+ if (!ret && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return ret;
}
@@ -12399,9 +12513,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
return 0;
}
-RTE_INIT(i40e_init_log);
-static void
-i40e_init_log(void)
+RTE_INIT(i40e_init_log)
{
i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
if (i40e_logtype_init >= 0)
@@ -12412,5 +12524,7 @@ i40e_init_log(void)
}
RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
- QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
+ ETH_I40E_FLOATING_VEB_ARG "=1"
+ ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 11c4c76b..3fffe5a5 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -28,6 +28,7 @@
#define I40E_NUM_DESC_ALIGN 32
#define I40E_BUF_SIZE_MIN 1024
#define I40E_FRAME_SIZE_MAX 9728
+#define I40E_TSO_FRAME_SIZE_MAX 262144
#define I40E_QUEUE_BASE_ADDR_UNIT 128
/* number of VSIs and queue default setting */
#define I40E_MAX_QP_NUM_PER_VF 16
@@ -87,12 +88,18 @@
#define I40E_WRITE_GLB_REG(hw, reg, value) \
do { \
uint32_t ori_val; \
+ struct rte_eth_dev *dev; \
ori_val = I40E_READ_REG((hw), (reg)); \
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev; \
I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \
(reg)), (value)); \
- PMD_DRV_LOG(DEBUG, "global register [0x%08x] " \
- "original: 0x%08x, after: 0x%08x ", \
- (reg), (ori_val), (value)); \
+ if (ori_val != value) \
+ PMD_DRV_LOG(WARNING, \
+ "i40e device %s changed global " \
+ "register [0x%08x]. original: 0x%08x, " \
+ "new: 0x%08x ", \
+ (dev->device->name), (reg), \
+ (ori_val), (value)); \
} while (0)
/* index flex payload per layer */
@@ -178,7 +185,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_ITR_INDEX_NONE 3
#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
-#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 8160 /* 8160 us */
+#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
/* Special FW support this floating VEB feature */
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
@@ -1108,22 +1115,6 @@ struct i40e_valid_pattern {
parse_filter_t parse_filter;
};
-enum I40E_WARNING_IDX {
- I40E_WARNING_DIS_FLX_PLD,
- I40E_WARNING_ENA_FLX_PLD,
- I40E_WARNING_QINQ_PARSER,
- I40E_WARNING_QINQ_CLOUD_FILTER,
- I40E_WARNING_TPID,
- I40E_WARNING_FLOW_CTL,
- I40E_WARNING_GRE_KEY_LEN,
- I40E_WARNING_QF_CTL,
- I40E_WARNING_HASH_INSET,
- I40E_WARNING_HSYM,
- I40E_WARNING_HASH_MSK,
- I40E_WARNING_FD_MSK,
- I40E_WARNING_RPL_CLD_FILTER,
-};
-
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
@@ -1328,50 +1319,23 @@ i40e_align_floor(int n)
}
static inline uint16_t
-i40e_calc_itr_interval(int16_t interval, bool is_pf, bool is_multi_drv)
+i40e_calc_itr_interval(bool is_pf, bool is_multi_drv)
{
- if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) {
- if (is_multi_drv) {
- interval = I40E_QUEUE_ITR_INTERVAL_MAX;
- } else {
- if (is_pf)
- interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
- else
- interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT;
- }
+ uint16_t interval = 0;
+
+ if (is_multi_drv) {
+ interval = I40E_QUEUE_ITR_INTERVAL_MAX;
+ } else {
+ if (is_pf)
+ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+ else
+ interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT;
}
/* Convert to hardware count, as writing each 1 represents 2 us */
return interval / 2;
}
-static inline void
-i40e_global_cfg_warning(enum I40E_WARNING_IDX idx)
-{
- const char *warning;
- static const char *const warning_list[] = {
- [I40E_WARNING_DIS_FLX_PLD] = "disable FDIR flexible payload",
- [I40E_WARNING_ENA_FLX_PLD] = "enable FDIR flexible payload",
- [I40E_WARNING_QINQ_PARSER] = "support QinQ parser",
- [I40E_WARNING_QINQ_CLOUD_FILTER] = "support QinQ cloud filter",
- [I40E_WARNING_TPID] = "support TPID configuration",
- [I40E_WARNING_FLOW_CTL] = "configure water marker",
- [I40E_WARNING_GRE_KEY_LEN] = "support GRE key length setting",
- [I40E_WARNING_QF_CTL] = "support hash function setting",
- [I40E_WARNING_HASH_INSET] = "configure hash input set",
- [I40E_WARNING_HSYM] = "set symmetric hash",
- [I40E_WARNING_HASH_MSK] = "configure hash mask",
- [I40E_WARNING_FD_MSK] = "configure fdir mask",
- [I40E_WARNING_RPL_CLD_FILTER] = "replace cloud filter",
- };
-
- warning = warning_list[idx];
-
- RTE_LOG(WARNING, PMD,
- "Global register is changed during %s\n",
- warning);
-}
-
#define I40E_VALID_FLOW(flow_type) \
((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 804e4453..001c301b 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -44,6 +44,8 @@
#define I40EVF_BUSY_WAIT_COUNT 50
#define MAX_RESET_WAIT_CNT 20
+#define I40EVF_ALARM_INTERVAL 50000 /* us */
+
struct i40evf_arq_msg_info {
enum virtchnl_ops ops;
enum i40e_status_code result;
@@ -1133,7 +1135,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+ i40e_calc_itr_interval(0, 0);
vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
vf->dev_data = dev->data;
@@ -1370,7 +1372,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
* void
*/
static void
-i40evf_dev_interrupt_handler(void *param)
+i40evf_dev_alarm_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1399,6 +1401,8 @@ i40evf_dev_interrupt_handler(void *param)
done:
i40evf_enable_irq0(hw);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, dev);
}
static int
@@ -1442,12 +1446,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
return -1;
}
- /* register callback func to eal lib */
- rte_intr_callback_register(&pci_dev->intr_handle,
- i40evf_dev_interrupt_handler, (void *)eth_dev);
-
- /* enable uio intr after callback register */
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, eth_dev);
/* configure and enable device interrupt */
i40evf_enable_irq0(hw);
@@ -1536,7 +1536,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1583,37 +1583,35 @@ static int
i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = 0;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
-
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail register. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- I40EVF_WRITE_FLUSH(hw);
+ rte_wmb();
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ /* Init the RX tail register. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
- else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -1622,45 +1620,39 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct i40e_rx_queue *rxq;
int err;
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ rxq = dev->data->rx_queues[rx_queue_id];
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
-
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
static int
i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = 0;
+ int err;
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
-
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
-
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -1669,22 +1661,19 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
struct i40e_tx_queue *txq;
int err;
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
-
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
- tx_queue_id);
- return err;
- }
+ txq = dev->data->tx_queues[tx_queue_id];
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
@@ -1836,7 +1825,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+ i40e_calc_itr_interval(0, 0);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
@@ -1859,8 +1848,6 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40EVF_WRITE_FLUSH(hw);
- rte_intr_enable(&pci_dev->intr_handle);
-
return 0;
}
@@ -2016,17 +2003,9 @@ i40evf_dev_start(struct rte_eth_dev *dev)
goto err_mac;
}
- /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
- * is mapped to VFIO vector 0 in i40evf_dev_init( ).
- * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is
- * not cleared, it will fail when rte_intr_enable( ) tries to map Rx
- * queue interrupt to other VFIO vectors.
- * So clear uio/vfio intr/evevnfd first to avoid failure.
- */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
- rte_intr_disable(intr_handle);
+ /* only enable interrupt in rx interrupt mode */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
rte_intr_enable(intr_handle);
- }
i40evf_enable_queues_intr(dev);
@@ -2050,6 +2029,9 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ rte_intr_disable(intr_handle);
+
if (hw->adapter_stopped == 1)
return;
i40evf_stop_queues(dev);
@@ -2182,7 +2164,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- memset(dev_info, 0, sizeof(*dev_info));
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2200,6 +2181,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER;
@@ -2285,9 +2267,8 @@ static void
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
i40evf_dev_stop(dev);
i40e_dev_free_queues(dev);
/*
@@ -2300,12 +2281,6 @@ i40evf_dev_close(struct rte_eth_dev *dev)
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
- /* disable uio intr before callback unregister */
- rte_intr_disable(intr_handle);
-
- /* unregister callback func from eal lib */
- rte_intr_callback_unregister(intr_handle,
- i40evf_dev_interrupt_handler, dev);
i40evf_disable_irq0(hw);
}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index a4a61d1c..d41601a1 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -526,7 +526,6 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf,
(num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
- i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
}
for (i = 0; i < num; i++) {
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 89de6a59..c67b264d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -2263,7 +2263,6 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
(raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
- i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
}
/* Set flex pit */
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 6032d554..2a28ee34 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1439,13 +1439,15 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
/* Check for m->nb_segs to not exceed the limits. */
if (!(ol_flags & PKT_TX_TCP_SEG)) {
- if (m->nb_segs > I40E_TX_MAX_SEG ||
- m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ if (m->nb_segs > I40E_TX_MAX_MTU_SEG ||
+ m->pkt_len > I40E_FRAME_SIZE_MAX) {
rte_errno = -EINVAL;
return i;
}
- } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) ||
- (m->tso_segsz > I40E_MAX_TSO_MSS)) {
+ } else if (m->nb_segs > I40E_TX_MAX_SEG ||
+ m->tso_segsz < I40E_MIN_TSO_MSS ||
+ m->tso_segsz > I40E_MAX_TSO_MSS ||
+ m->pkt_len > I40E_TSO_FRAME_SIZE_MAX) {
/* MSS outside the range (256B - 9674B) are considered
* malicious
*/
@@ -1458,6 +1460,12 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+ /* check the size of packet */
+ if (m->pkt_len < I40E_TX_MIN_PKT_LEN) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
@@ -1524,38 +1532,36 @@ int
i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = -1;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail regieter. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ rte_wmb();
- err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- } else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
int
@@ -1565,24 +1571,21 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- /*
- * rx_queue_id is queue id application refers to, while
- * rxq->reg_idx is the real queue index.
- */
- err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /*
+ * rx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1590,28 +1593,27 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = -1;
+ int err;
struct i40e_tx_queue *txq;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
- /*
- * tx_queue_id is queue id application refers to, while
- * rxq->reg_idx is the real queue index.
- */
- err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
int
@@ -1621,26 +1623,23 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
- /*
- * tx_queue_id is queue id application refers to, while
- * txq->reg_idx is the real queue index.
- */
- err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
- tx_queue_id);
- return err;
- }
-
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * txq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
@@ -1829,8 +1828,10 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2087,7 +2088,7 @@ i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
}
/* check simple tx conflict */
if (ad->tx_simple_allowed) {
- if (txq->offloads != 0 ||
+ if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
PMD_DRV_LOG(ERR, "No-simple tx is required.");
return -EINVAL;
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index ea73a8a1..3fc619af 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -30,6 +30,8 @@
#define I40E_TX_MAX_SEG UINT8_MAX
#define I40E_TX_MAX_MTU_SEG 8
+#define I40E_TX_MIN_PKT_LEN 17
+
#undef container_of
#define container_of(ptr, type, member) ({ \
typeof(((type *)0)->member)(*__mptr) = (ptr); \
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
index dbcb61f3..23179b3b 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -188,7 +188,7 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
*/
- while (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
i40e_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index 7aa1a751..bba62b1c 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -1709,6 +1709,7 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
else if (is_exist == 3)
PMD_DRV_LOG(ERR, "Profile of different group already exists");
+ i40e_update_customized_info(dev, buff, size, op);
rte_free(profile_info_sec);
return -EEXIST;
}
@@ -3162,8 +3163,6 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
i40e_check_write_global_reg(hw,
I40E_GLQF_HASH_MSK(i, pctype),
mask_reg[i]);
- i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
- i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
break;
case INSET_FDIR:
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
@@ -3175,7 +3174,6 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
i40e_check_write_global_reg(hw,
I40E_GLQF_FD_MSK(i, pctype),
mask_reg[i]);
- i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
break;
case INSET_FDIR_FLX:
i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
diff --git a/drivers/net/ifc/Makefile b/drivers/net/ifc/Makefile
index 1011995b..39b36ae5 100644
--- a/drivers/net/ifc/Makefile
+++ b/drivers/net/ifc/Makefile
@@ -6,7 +6,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
# library name
#
-LIB = librte_ifcvf_vdpa.a
+LIB = librte_pmd_ifc.a
LDLIBS += -lpthread
LDLIBS += -lrte_eal -lrte_pci -lrte_vhost -lrte_bus_pci
@@ -22,14 +22,14 @@ BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.
VPATH += $(SRCDIR)/base
-EXPORT_MAP := rte_ifcvf_version.map
+EXPORT_MAP := rte_pmd_ifc_version.map
LIBABIVER := 1
#
# all source are stored in SRCS-y
#
-SRCS-$(CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD) += ifcvf_vdpa.c
-SRCS-$(CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD) += ifcvf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifcvf_vdpa.c
+SRCS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifcvf.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ifc/ifcvf_vdpa.c b/drivers/net/ifc/ifcvf_vdpa.c
index c6627c23..88d81403 100644
--- a/drivers/net/ifc/ifcvf_vdpa.c
+++ b/drivers/net/ifc/ifcvf_vdpa.c
@@ -646,6 +646,9 @@ ifcvf_get_vdpa_features(int did, uint64_t *features)
#define VDPA_SUPPORTED_PROTOCOL_FEATURES \
(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
+ 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
+ 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
+ 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD)
static int
ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features)
@@ -782,9 +785,7 @@ RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");
-RTE_INIT(ifcvf_vdpa_init_log);
-static void
-ifcvf_vdpa_init_log(void)
+RTE_INIT(ifcvf_vdpa_init_log)
{
ifcvf_vdpa_logtype = rte_log_register("pmd.net.ifcvf_vdpa");
if (ifcvf_vdpa_logtype >= 0)
diff --git a/drivers/net/ifc/meson.build b/drivers/net/ifc/meson.build
new file mode 100644
index 00000000..72df070a
--- /dev/null
+++ b/drivers/net/ifc/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+build = dpdk_conf.has('RTE_LIBRTE_VHOST')
+allow_experimental_apis = true
+sources = files('ifcvf_vdpa.c', 'base/ifcvf.c')
+includes += include_directories('base')
+deps += 'vhost'
diff --git a/drivers/net/ifc/rte_ifcvf_version.map b/drivers/net/ifc/rte_pmd_ifc_version.map
index 9b9ab1a4..9b9ab1a4 100644
--- a/drivers/net/ifc/rte_ifcvf_version.map
+++ b/drivers/net/ifc/rte_pmd_ifc_version.map
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 87d2ad09..26b19273 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -3120,9 +3120,18 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
}
/* Flow Director Stats registers */
- hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
- hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
+ IXGBE_FDIRUSTAT) & 0xFFFF;
+ hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
+ IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
+ hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
+ IXGBE_FDIRFSTAT) & 0xFFFF;
+ hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
+ IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
+ }
/* MACsec Stats registers */
macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
macsec_stats->out_pkts_encrypted +=
@@ -3755,6 +3764,14 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
dev_info->speed_capa |= ETH_LINK_SPEED_5G;
}
+
+ /* Driver-preferred Rx/Tx parameters */
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
}
static const uint32_t *
@@ -4991,12 +5008,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
- if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
@@ -8567,9 +8584,7 @@ RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
-RTE_INIT(ixgbe_init_log);
-static void
-ixgbe_init_log(void)
+RTE_INIT(ixgbe_init_log)
{
ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
if (ixgbe_logtype_init >= 0)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index e42ec30d..d0b93968 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -100,6 +100,11 @@
#define IXGBE_5TUPLE_MAX_PRI 7
#define IXGBE_5TUPLE_MIN_PRI 1
+/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000
+/* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0
+
#define IXGBE_RSS_OFFLOAD_ALL ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index d5e51797..e559f0fa 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -394,9 +394,12 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
IXGBE_FDIRIP6M_TNI_VNI;
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- mac_mask = info->mask.mac_addr_byte_mask;
- fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
- & IXGBE_FDIRIP6M_INNER_MAC;
+ fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
+ mac_mask = info->mask.mac_addr_byte_mask &
+ (IXGBE_FDIRIP6M_INNER_MAC >>
+ IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
+ fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
+ IXGBE_FDIRIP6M_INNER_MAC);
switch (info->mask.tunnel_type_mask) {
case 0:
@@ -771,10 +774,19 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
input->formatted.inner_mac,
fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
sizeof(input->formatted.inner_mac));
- input->formatted.tunnel_type =
- fdir_filter->input.flow.tunnel_flow.tunnel_type;
+ if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_VXLAN)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
+ else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
+ else
+ PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
+
input->formatted.tni_vni =
- fdir_filter->input.flow.tunnel_flow.tunnel_id;
+ fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
}
return 0;
@@ -1001,8 +1013,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
} else {
/* tunnel mode */
- if (input->formatted.tunnel_type !=
- RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ if (input->formatted.tunnel_type)
tunnel_type = 0x80000000;
tunnel_type |= addr_high;
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
@@ -1010,6 +1021,9 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
input->formatted.tni_vni);
}
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
}
/* record vlan (little-endian) and flex_bytes(big-endian) */
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index eb0644c8..1adf1b80 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1739,7 +1739,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
} else {
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2437,7 +2438,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Get the VxLAN info */
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_VXLAN;
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
/* Only care about VNI, others should be masked. */
if (!item->mask) {
@@ -2487,17 +2488,15 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
rule->b_spec = TRUE;
vxlan_spec = item->spec;
rte_memcpy(((uint8_t *)
- &rule->ixgbe_fdir.formatted.tni_vni + 1),
+ &rule->ixgbe_fdir.formatted.tni_vni),
vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
- rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
- rule->ixgbe_fdir.formatted.tni_vni);
}
}
/* Get the NVGRE info */
if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_NVGRE;
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
/**
* Only care about flags0, flags1, protocol and TNI,
@@ -2587,7 +2586,6 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* tni is a 24-bits bit field */
rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
- rule->ixgbe_fdir.formatted.tni_vni <<= 8;
}
}
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index de7ed367..08405f1e 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -609,7 +609,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
- if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+ if (rte_eth_dev_must_keep_crc(rx_offloads)) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 4d199c80..4b833ffa 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -128,21 +128,23 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
- vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
-
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
- ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
- if (ret)
- PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
-
vf_num = dev_num_vf(eth_dev);
if (vf_num == 0)
return;
+ vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+ if (*vfinfo == NULL)
+ return;
+
+ ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
rte_free(*vfinfo);
*vfinfo = NULL;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 3e13d26a..f82b74a9 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1420,7 +1420,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
/*
* Check if VLAN present only.
* Do not check whether L3/L4 rx checksum done by NIC or not,
- * That can be found from rte_eth_rxmode.hw_ip_checksum flag
+ * That can be found from rte_eth_rxmode.offloads flag
*/
pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
@@ -2849,6 +2849,7 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER;
@@ -2935,8 +2936,10 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->offloads = offloads;
@@ -4702,7 +4705,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads) &&
(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
@@ -4851,10 +4854,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
- hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
- else
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
+ else
+ hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
/*
* Configure jumbo frame support, if any.
@@ -4892,8 +4895,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ?
- 0 : ETHER_CRC_LEN;
+ rxq->crc_len = rte_eth_dev_must_keep_crc(rx_conf->offloads) ?
+ ETHER_CRC_LEN : 0;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
@@ -4962,10 +4965,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP)
- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
- else
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
+ else
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
@@ -5173,34 +5176,30 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- /* Allocate buffers for descriptor rings */
- if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
- PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
- rx_queue_id);
- return -1;
- }
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ rxq = dev->data->rx_queues[rx_queue_id];
- /* Wait until RX Enable ready */
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
- rx_queue_id);
- rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
+ /* Allocate buffers for descriptor rings */
+ if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+ rx_queue_id);
return -1;
+ }
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
@@ -5221,30 +5220,26 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
- /* Wait until RX Enable bit clear */
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
- rx_queue_id);
+ /* Wait until RX Enable bit clear */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
- rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
- ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(adapter, rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- } else
- return -1;
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -5264,30 +5259,27 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+ txq = dev->data->tx_queues[tx_queue_id];
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
- /* Wait until TX Enable ready */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- txdctl = IXGBE_READ_REG(hw,
- IXGBE_TXDCTL(txq->reg_idx));
- } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable "
- "Tx Queue %d", tx_queue_id);
- }
- rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
- return -1;
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
+ tx_queue_id);
+ }
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
@@ -5307,9 +5299,6 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id >= dev->data->nb_tx_queues)
- return -1;
-
txq = dev->data->tx_queues[tx_queue_id];
/* Wait until TX queue is empty */
@@ -5323,8 +5312,9 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IXGBE_TDT(txq->reg_idx));
} while (--poll_ms && (txtdh != txtdt));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
- "when stopping.", tx_queue_id);
+ PMD_INIT_LOG(ERR,
+ "Tx Queue %d is not empty when stopping.",
+ tx_queue_id);
}
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
@@ -5340,8 +5330,8 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IXGBE_TXDCTL(txq->reg_idx));
} while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable "
- "Tx Queue %d", tx_queue_id);
+ PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+ tx_queue_id);
}
if (txq->ops != NULL) {
diff --git a/drivers/net/kni/meson.build b/drivers/net/kni/meson.build
new file mode 100644
index 00000000..0f784c6d
--- /dev/null
+++ b/drivers/net/kni/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# this driver can be built if-and-only-if KNI library is buildable
+build = dpdk_conf.has('RTE_LIBRTE_KNI')
+allow_experimental_apis = true
+sources = files('rte_eth_kni.c')
+deps += 'kni'
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index ab63ea42..085bb845 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -207,6 +207,7 @@ eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -419,6 +420,7 @@ eth_kni_probe(struct rte_vdev_device *vdev)
}
/* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &eth_kni_ops;
+ eth_dev->device = &vdev->device;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -487,9 +489,7 @@ static struct rte_vdev_driver eth_kni_drv = {
RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "=<int>");
-RTE_INIT(eth_kni_init_log);
-static void
-eth_kni_init_log(void)
+RTE_INIT(eth_kni_init_log)
{
eth_kni_logtype = rte_log_register("pmd.net.kni");
if (eth_kni_logtype >= 0)
diff --git a/drivers/net/liquidio/Makefile b/drivers/net/liquidio/Makefile
index fc5f18ad..f1092851 100644
--- a/drivers/net/liquidio/Makefile
+++ b/drivers/net/liquidio/Makefile
@@ -15,7 +15,7 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
-EXPORT_MAP := rte_pmd_lio_version.map
+EXPORT_MAP := rte_pmd_liquidio_version.map
LIBABIVER := 1
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index 64b1b86c..93e89007 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -2143,9 +2143,7 @@ RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
-RTE_INIT(lio_init_log);
-static void
-lio_init_log(void)
+RTE_INIT(lio_init_log)
{
lio_logtype_init = rte_log_register("pmd.net.liquidio.init");
if (lio_logtype_init >= 0)
diff --git a/drivers/net/liquidio/meson.build b/drivers/net/liquidio/meson.build
new file mode 100644
index 00000000..9ae48e21
--- /dev/null
+++ b/drivers/net/liquidio/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('base/lio_23xx_vf.c',
+ 'base/lio_mbox.c',
+ 'lio_ethdev.c',
+ 'lio_rxtx.c')
+includes += include_directories('base')
diff --git a/drivers/net/liquidio/rte_pmd_lio_version.map b/drivers/net/liquidio/rte_pmd_liquidio_version.map
index 8591cc0b..8591cc0b 100644
--- a/drivers/net/liquidio/rte_pmd_lio_version.map
+++ b/drivers/net/liquidio/rte_pmd_liquidio_version.map
diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index b7d00a04..9c28ed4d 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -1,10 +1,33 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['af_packet', 'axgbe', 'bonding', 'dpaa', 'dpaa2',
- 'e1000', 'enic', 'fm10k', 'i40e', 'ixgbe',
- 'mvpp2', 'null', 'octeontx', 'pcap', 'ring',
- 'sfc', 'thunderx', 'virtio']
+drivers = ['af_packet',
+ 'ark',
+ 'avp',
+ 'axgbe', 'bonding',
+ 'bnx2x',
+ 'bnxt',
+ 'cxgbe',
+ 'dpaa', 'dpaa2',
+ 'e1000',
+ 'ena',
+ 'enic',
+ 'failsafe',
+ 'fm10k', 'i40e',
+ 'ifc',
+ 'ixgbe',
+ 'kni',
+ 'liquidio',
+ 'mvpp2',
+ 'netvsc',
+ 'nfp',
+ 'null', 'octeontx', 'pcap', 'ring',
+ 'sfc',
+ 'softnic',
+ 'szedata2',
+ 'thunderx',
+ 'vhost',
+ 'virtio']
std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std
std_deps += ['bus_vdev'] # same with vdev bus
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 73f9d405..92e93225 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -85,6 +85,11 @@ mlx4_autoconf.h.new: FORCE
mlx4_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q : > '$@'
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX4_WQE_LSO_SEG \
+ infiniband/mlx4dv.h \
+ type 'struct mlx4_wqe_lso_seg' \
+ $(AUTOCONF_OUTPUT)
# Create mlx4_autoconf.h or update it in case it differs from the new one.
@@ -110,7 +115,7 @@ endif
$(LIB_GLUE): mlx4_glue.o
$Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
-Wl,-h,$(LIB_GLUE) \
- -s -shared -o $@ $< -libverbs -lmlx4
+ -shared -o $@ $< -libverbs -lmlx4
mlx4_glue.o: mlx4_autoconf.h
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index a29814b3..defc0d4b 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -575,14 +575,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_dev = list[i];
DEBUG("device opened");
if (mlx4_glue->query_device(attr_ctx, &device_attr)) {
- rte_errno = ENODEV;
+ err = ENODEV;
goto error;
}
INFO("%u port(s) detected", device_attr.phys_port_cnt);
conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
if (mlx4_args(pci_dev->device.devargs, &conf)) {
ERROR("failed to process device arguments");
- rte_errno = EINVAL;
+ err = EINVAL;
goto error;
}
/* Use all ports when none are defined */
@@ -590,7 +590,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
conf.ports.enabled = conf.ports.present;
/* Retrieve extended device attributes. */
if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
- rte_errno = ENODEV;
+ err = ENODEV;
goto error;
}
assert(device_attr.max_sge >= MLX4_MAX_SGE);
@@ -609,18 +609,18 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("using port %u", port);
ctx = mlx4_glue->open_device(ibv_dev);
if (ctx == NULL) {
- rte_errno = ENODEV;
+ err = ENODEV;
goto port_error;
}
/* Check port status. */
err = mlx4_glue->query_port(ctx, port, &port_attr);
if (err) {
- rte_errno = err;
- ERROR("port query failed: %s", strerror(rte_errno));
+ err = ENODEV;
+ ERROR("port query failed: %s", strerror(err));
goto port_error;
}
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
- rte_errno = ENOTSUP;
+ err = ENOTSUP;
ERROR("port %d is not configured in Ethernet mode",
port);
goto port_error;
@@ -630,15 +630,16 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
port, mlx4_glue->port_state_str(port_attr.state),
port_attr.state);
/* Make asynchronous FD non-blocking to handle interrupts. */
- if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
+ err = mlx4_fd_set_non_blocking(ctx->async_fd);
+ if (err) {
ERROR("cannot make asynchronous FD non-blocking: %s",
- strerror(rte_errno));
+ strerror(err));
goto port_error;
}
/* Allocate protection domain. */
pd = mlx4_glue->alloc_pd(ctx);
if (pd == NULL) {
- rte_errno = ENOMEM;
+ err = ENOMEM;
ERROR("PD allocation failure");
goto port_error;
}
@@ -647,7 +648,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
sizeof(*priv),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- rte_errno = ENOMEM;
+ err = ENOMEM;
ERROR("priv allocation failure");
goto port_error;
}
@@ -672,14 +673,27 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
&device_attr_ex);
DEBUG("supported RSS hash fields mask: %016" PRIx64,
priv->hw_rss_sup);
+ priv->hw_rss_max_qps =
+ device_attr_ex.rss_caps.max_rwq_indirection_table_size;
+ DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps);
priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
IBV_RAW_PACKET_CAP_SCATTER_FCS);
DEBUG("FCS stripping toggling is %ssupported",
priv->hw_fcs_strip ? "" : "not ");
+ priv->tso =
+ ((device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (priv->tso)
+ priv->tso_max_payload_sz =
+ device_attr_ex.tso_caps.max_tso;
+ DEBUG("TSO is %ssupported",
+ priv->tso ? "" : "not ");
/* Configure the first MAC address by default. */
- if (mlx4_get_mac(priv, &mac.addr_bytes)) {
+ err = mlx4_get_mac(priv, &mac.addr_bytes);
+ if (err) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
- " (rte_errno: %s)", strerror(rte_errno));
+ " (error: %s)", strerror(err));
goto port_error;
}
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
@@ -712,8 +726,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev = rte_eth_dev_allocate(name);
}
if (eth_dev == NULL) {
+ err = ENOMEM;
ERROR("can not allocate rte ethdev");
- rte_errno = ENOMEM;
goto port_error;
}
eth_dev->data->dev_private = priv;
@@ -776,8 +790,6 @@ port_error:
rte_eth_dev_release_port(eth_dev);
break;
}
- if (i == device_attr.phys_port_cnt)
- return 0;
/*
* XXX if something went wrong in the loop above, there is a resource
* leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
@@ -789,8 +801,9 @@ error:
claim_zero(mlx4_glue->close_device(attr_ctx));
if (list)
mlx4_glue->free_device_list(list);
- assert(rte_errno >= 0);
- return -rte_errno;
+ if (err)
+ rte_errno = err;
+ return -err;
}
static const struct rte_pci_id mlx4_pci_id_map[] = {
@@ -954,9 +967,7 @@ glue_error:
/**
* Driver initialization routine.
*/
-RTE_INIT(rte_mlx4_pmd_init);
-static void
-rte_mlx4_pmd_init(void)
+RTE_INIT(rte_mlx4_pmd_init)
{
/*
* MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 300cb4d7..e6fb934f 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -47,6 +47,9 @@
/** Interrupt alarm timeout value in microseconds. */
#define MLX4_INTR_ALARM_TIMEOUT 100000
+/* Maximum packet headers size (L2+L3+L4) for TSO. */
+#define MLX4_MAX_TSO_HEADER 192
+
/** Port parameter. */
#define MLX4_PMD_PORT_KVARG "port"
@@ -90,6 +93,9 @@ struct priv {
uint32_t hw_csum:1; /**< Checksum offload is supported. */
uint32_t hw_csum_l2tun:1; /**< Checksum support for L2 tunnels. */
uint32_t hw_fcs_strip:1; /**< FCS stripping toggling is supported. */
+ uint32_t tso:1; /**< Transmit segmentation offload is supported. */
+ uint32_t tso_max_payload_sz; /**< Max supported TSO payload size. */
+ uint32_t hw_rss_max_qps; /**< Max Rx Queues supported by RSS. */
uint64_t hw_rss_sup; /**< Supported RSS hash fields (Verbs format). */
struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h
index e15a3c14..aef77ba0 100644
--- a/drivers/net/mlx4/mlx4_prm.h
+++ b/drivers/net/mlx4/mlx4_prm.h
@@ -19,6 +19,7 @@
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
+#include "mlx4_autoconf.h"
/* ConnectX-3 Tx queue basic block. */
#define MLX4_TXBB_SHIFT 6
@@ -40,6 +41,7 @@
/* Work queue element (WQE) flags. */
#define MLX4_WQE_CTRL_IIP_HDR_CSUM (1 << 28)
#define MLX4_WQE_CTRL_IL4_HDR_CSUM (1 << 27)
+#define MLX4_WQE_CTRL_RR (1 << 6)
/* CQE checksum flags. */
enum {
@@ -51,6 +53,7 @@ enum {
};
/* CQE status flags. */
+#define MLX4_CQE_STATUS_IPV6F (1 << 12)
#define MLX4_CQE_STATUS_IPV4 (1 << 22)
#define MLX4_CQE_STATUS_IPV4F (1 << 23)
#define MLX4_CQE_STATUS_IPV6 (1 << 24)
@@ -97,6 +100,19 @@ struct mlx4_cq {
int arm_sn; /**< Rx event counter. */
};
+#ifndef HAVE_IBV_MLX4_WQE_LSO_SEG
+/*
+ * WQE LSO segment structure.
+ * Defined here as backward compatibility for rdma-core v17 and below.
+ * Similar definition is found in infiniband/mlx4dv.h in rdma-core v18
+ * and above.
+ */
+struct mlx4_wqe_lso_seg {
+ rte_be32_t mss_hdr_size;
+ rte_be32_t header[];
+};
+#endif
+
/**
* Retrieve a CQE entry from a CQ.
*
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 87688c1c..9737da2e 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -338,6 +338,12 @@ mlx4_rss_init(struct priv *priv)
if (priv->rss_init)
return 0;
+ if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) {
+ ERROR("RSS does not support more than %d queues",
+ priv->hw_rss_max_qps);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
/* Prepare range for RSS contexts before creating the first WQ. */
ret = mlx4_glue->dv_set_context_attr
(priv->ctx,
@@ -672,7 +678,9 @@ uint64_t
mlx4_get_rx_queue_offloads(struct priv *priv)
{
uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
if (priv->hw_csum)
offloads |= DEV_RX_OFFLOAD_CHECKSUM;
@@ -771,16 +779,16 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, idx, desc);
}
/* By default, FCS (CRC) is stripped by hardware. */
- if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
- crc_present = 0;
- } else if (priv->hw_fcs_strip) {
- crc_present = 1;
- } else {
- WARN("%p: CRC stripping has been disabled but will still"
- " be performed by hardware, make sure MLNX_OFED and"
- " firmware are up to date",
- (void *)dev);
- crc_present = 0;
+ crc_present = 0;
+ if (rte_eth_dev_must_keep_crc(offloads)) {
+ if (priv->hw_fcs_strip) {
+ crc_present = 1;
+ } else {
+ WARN("%p: CRC stripping has been disabled but will still"
+ " be performed by hardware, make sure MLNX_OFED and"
+ " firmware are up to date",
+ (void *)dev);
+ }
}
DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
" incoming frames to hide it",
diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
index a92da66b..8c88effc 100644
--- a/drivers/net/mlx4/mlx4_rxtx.c
+++ b/drivers/net/mlx4/mlx4_rxtx.c
@@ -38,10 +38,29 @@
* DWORD (32 byte) of a TXBB.
*/
struct pv {
- volatile struct mlx4_wqe_data_seg *dseg;
+ union {
+ volatile struct mlx4_wqe_data_seg *dseg;
+ volatile uint32_t *dst;
+ };
uint32_t val;
};
+/** A helper structure for TSO packet handling. */
+struct tso_info {
+ /** Pointer to the array of saved first DWORD (32 byte) of a TXBB. */
+ struct pv *pv;
+ /** Current entry in the pv array. */
+ int pv_counter;
+ /** Total size of the WQE including padding. */
+ uint32_t wqe_size;
+ /** Size of TSO header to prepend to each packet to send. */
+ uint16_t tso_header_size;
+ /** Total size of the TSO segment in the WQE. */
+ uint16_t wqe_tso_seg_size;
+ /** Raw WQE size in units of 16 Bytes and without padding. */
+ uint8_t fence_size;
+};
+
/** A table to translate Rx completion flags to packet type. */
uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
/*
@@ -52,49 +71,58 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
* bit[4] - MLX4_CQE_STATUS_TCP
* bit[3] - MLX4_CQE_STATUS_IPV4OPT
* bit[2] - MLX4_CQE_STATUS_IPV6
- * bit[1] - MLX4_CQE_STATUS_IPV4F
+ * bit[1] - MLX4_CQE_STATUS_IPF
* bit[0] - MLX4_CQE_STATUS_IPV4
* giving a total of up to 256 entries.
*/
+ /* L2 */
[0x00] = RTE_PTYPE_L2_ETHER,
+ /* L3 */
[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_NONFRAG,
[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_FRAG,
[0x03] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_FRAG,
- [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
- [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT,
+ [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [0x08] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_NONFRAG,
+ [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_NONFRAG,
[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L4_FRAG,
+ [0x0b] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_FRAG,
+ /* TCP */
[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP,
- [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
[0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP,
+ [0x16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
[0x18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L4_TCP,
[0x19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L4_TCP,
- [0x1a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
- RTE_PTYPE_L4_TCP,
+ /* UDP */
[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP,
- [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
[0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP,
+ [0x26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
[0x28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L4_UDP,
[0x29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L4_UDP,
- [0x2a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
- RTE_PTYPE_L4_UDP,
/* Tunneled - L3 IPV6 */
[0x80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG,
@@ -102,65 +130,58 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG,
[0x84] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0x88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x8b] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
/* Tunneled - L3 IPV6, TCP */
[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP,
- [0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
- [0x93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
[0x94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP,
+ [0x96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0x98] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
[0x99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_TCP,
- [0x9a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
/* Tunneled - L3 IPV6, UDP */
- [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP,
- [0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_UDP,
- [0xa3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_UDP,
- [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP,
- [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ [0xa6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_UDP,
- [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_UDP,
- [0xaa] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_UDP,
/* Tunneled - L3 IPV4 */
[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG,
@@ -168,65 +189,54 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG,
[0xc4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0xc8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_FRAG,
+ [0xcb] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
/* Tunneled - L3 IPV4, TCP */
- [0xd0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP,
- [0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
- [0xd3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
[0xd4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP,
+ [0xd6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0xd8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_TCP,
[0xd9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_TCP,
- [0xda] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
/* Tunneled - L3 IPV4, UDP */
- [0xe0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP,
- [0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_UDP,
- [0xe3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_UDP,
[0xe4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP,
+ [0xe6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0xe8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
[0xe9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
- [0xea] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_UDP,
};
@@ -377,6 +387,342 @@ mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg,
}
/**
+ * Obtain and calculate TSO information needed for assembling a TSO WQE.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param tinfo
+ * Pointer to a structure to fill the info with.
+ *
+ * @return
+ * 0 on success, negative value upon error.
+ */
+static inline int
+mlx4_tx_burst_tso_get_params(struct rte_mbuf *buf,
+ struct txq *txq,
+ struct tso_info *tinfo)
+{
+ struct mlx4_sq *sq = &txq->msq;
+ const uint8_t tunneled = txq->priv->hw_csum_l2tun &&
+ (buf->ol_flags & PKT_TX_TUNNEL_MASK);
+
+ tinfo->tso_header_size = buf->l2_len + buf->l3_len + buf->l4_len;
+ if (tunneled)
+ tinfo->tso_header_size +=
+ buf->outer_l2_len + buf->outer_l3_len;
+ if (unlikely(buf->tso_segsz == 0 ||
+ tinfo->tso_header_size == 0 ||
+ tinfo->tso_header_size > MLX4_MAX_TSO_HEADER ||
+ tinfo->tso_header_size > buf->data_len))
+ return -EINVAL;
+ /*
+ * Calculate the WQE TSO segment size
+ * Note:
+ * 1. An LSO segment must be padded such that the subsequent data
+ * segment is 16-byte aligned.
+ * 2. The start address of the TSO segment is always 16 Bytes aligned.
+ */
+ tinfo->wqe_tso_seg_size = RTE_ALIGN(sizeof(struct mlx4_wqe_lso_seg) +
+ tinfo->tso_header_size,
+ sizeof(struct mlx4_wqe_data_seg));
+ tinfo->fence_size = ((sizeof(struct mlx4_wqe_ctrl_seg) +
+ tinfo->wqe_tso_seg_size) >> MLX4_SEG_SHIFT) +
+ buf->nb_segs;
+ tinfo->wqe_size =
+ RTE_ALIGN((uint32_t)(tinfo->fence_size << MLX4_SEG_SHIFT),
+ MLX4_TXBB_SIZE);
+ /* Validate WQE size and WQE space in the send queue. */
+ if (sq->remain_size < tinfo->wqe_size ||
+ tinfo->wqe_size > MLX4_MAX_WQE_SIZE)
+ return -ENOMEM;
+ /* Init pv. */
+ tinfo->pv = (struct pv *)txq->bounce_buf;
+ tinfo->pv_counter = 0;
+ return 0;
+}
+
+/**
+ * Fill the TSO WQE data segments with info on buffers to transmit .
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param tinfo
+ * Pointer to TSO info to use.
+ * @param dseg
+ * Pointer to the first data segment in the TSO WQE.
+ * @param ctrl
+ * Pointer to the control segment in the TSO WQE.
+ *
+ * @return
+ * 0 on success, negative value upon error.
+ */
+static inline volatile struct mlx4_wqe_ctrl_seg *
+mlx4_tx_burst_fill_tso_dsegs(struct rte_mbuf *buf,
+ struct txq *txq,
+ struct tso_info *tinfo,
+ volatile struct mlx4_wqe_data_seg *dseg,
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
+{
+ uint32_t lkey;
+ int nb_segs = buf->nb_segs;
+ int nb_segs_txbb;
+ struct mlx4_sq *sq = &txq->msq;
+ struct rte_mbuf *sbuf = buf;
+ struct pv *pv = tinfo->pv;
+ int *pv_counter = &tinfo->pv_counter;
+ volatile struct mlx4_wqe_ctrl_seg *ctrl_next =
+ (volatile struct mlx4_wqe_ctrl_seg *)
+ ((volatile uint8_t *)ctrl + tinfo->wqe_size);
+ uint16_t data_len = sbuf->data_len - tinfo->tso_header_size;
+ uintptr_t data_addr = rte_pktmbuf_mtod_offset(sbuf, uintptr_t,
+ tinfo->tso_header_size);
+
+ do {
+ /* how many dseg entries do we have in the current TXBB ? */
+ nb_segs_txbb = (MLX4_TXBB_SIZE -
+ ((uintptr_t)dseg & (MLX4_TXBB_SIZE - 1))) >>
+ MLX4_SEG_SHIFT;
+ switch (nb_segs_txbb) {
+#ifndef NDEBUG
+ default:
+ /* Should never happen. */
+ rte_panic("%p: Invalid number of SGEs(%d) for a TXBB",
+ (void *)txq, nb_segs_txbb);
+ /* rte_panic never returns. */
+ break;
+#endif /* NDEBUG */
+ case 4:
+ /* Memory region key for this memory pool. */
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ dseg->addr = rte_cpu_to_be_64(data_addr);
+ dseg->lkey = lkey;
+ /*
+ * This data segment starts at the beginning of a new
+ * TXBB, so we need to postpone its byte_count writing
+ * for later.
+ */
+ pv[*pv_counter].dseg = dseg;
+ /*
+ * Zero length segment is treated as inline segment
+ * with zero data.
+ */
+ pv[(*pv_counter)++].val =
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000);
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ case 3:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ mlx4_fill_tx_data_seg(dseg, lkey, data_addr,
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000));
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ case 2:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ mlx4_fill_tx_data_seg(dseg, lkey, data_addr,
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000));
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ case 1:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ mlx4_fill_tx_data_seg(dseg, lkey, data_addr,
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000));
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ }
+ /* Wrap dseg if it points at the end of the queue. */
+ if ((volatile uint8_t *)dseg >= sq->eob)
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ ((volatile uint8_t *)dseg - sq->size);
+ } while (true);
+err:
+ return NULL;
+}
+
+/**
+ * Fill the packet's l2, l3 and l4 headers to the WQE.
+ *
+ * This will be used as the header for each TSO segment that is transmitted.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param tinfo
+ * Pointer to TSO info to use.
+ * @param ctrl
+ * Pointer to the control segment in the TSO WQE.
+ *
+ * @return
+ * 0 on success, negative value upon error.
+ */
+static inline volatile struct mlx4_wqe_data_seg *
+mlx4_tx_burst_fill_tso_hdr(struct rte_mbuf *buf,
+ struct txq *txq,
+ struct tso_info *tinfo,
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
+{
+ volatile struct mlx4_wqe_lso_seg *tseg =
+ (volatile struct mlx4_wqe_lso_seg *)(ctrl + 1);
+ struct mlx4_sq *sq = &txq->msq;
+ struct pv *pv = tinfo->pv;
+ int *pv_counter = &tinfo->pv_counter;
+ int remain_size = tinfo->tso_header_size;
+ char *from = rte_pktmbuf_mtod(buf, char *);
+ uint16_t txbb_avail_space;
+ /* Union to overcome volatile constraints when copying TSO header. */
+ union {
+ volatile uint8_t *vto;
+ uint8_t *to;
+ } thdr = { .vto = (volatile uint8_t *)tseg->header, };
+
+ /*
+ * TSO data always starts at offset 20 from the beginning of the TXBB
+ * (16 byte ctrl + 4byte TSO desc). Since each TXBB is 64Byte aligned
+ * we can write the first 44 TSO header bytes without worry for TxQ
+ * wrapping or overwriting the first TXBB 32bit word.
+ */
+ txbb_avail_space = MLX4_TXBB_SIZE -
+ (sizeof(struct mlx4_wqe_ctrl_seg) +
+ sizeof(struct mlx4_wqe_lso_seg));
+ while (remain_size >= (int)(txbb_avail_space + sizeof(uint32_t))) {
+ /* Copy to end of txbb. */
+ rte_memcpy(thdr.to, from, txbb_avail_space);
+ from += txbb_avail_space;
+ thdr.to += txbb_avail_space;
+ /* New TXBB, Check for TxQ wrap. */
+ if (thdr.to >= sq->eob)
+ thdr.vto = sq->buf;
+ /* New TXBB, stash the first 32bits for later use. */
+ pv[*pv_counter].dst = (volatile uint32_t *)thdr.to;
+ pv[(*pv_counter)++].val = *(uint32_t *)from,
+ from += sizeof(uint32_t);
+ thdr.to += sizeof(uint32_t);
+ remain_size -= txbb_avail_space + sizeof(uint32_t);
+ /* Avail space in new TXBB is TXBB size - 4 */
+ txbb_avail_space = MLX4_TXBB_SIZE - sizeof(uint32_t);
+ }
+ if (remain_size > txbb_avail_space) {
+ rte_memcpy(thdr.to, from, txbb_avail_space);
+ from += txbb_avail_space;
+ thdr.to += txbb_avail_space;
+ remain_size -= txbb_avail_space;
+ /* New TXBB, Check for TxQ wrap. */
+ if (thdr.to >= sq->eob)
+ thdr.vto = sq->buf;
+ pv[*pv_counter].dst = (volatile uint32_t *)thdr.to;
+ rte_memcpy(&pv[*pv_counter].val, from, remain_size);
+ (*pv_counter)++;
+ } else if (remain_size) {
+ rte_memcpy(thdr.to, from, remain_size);
+ }
+ tseg->mss_hdr_size = rte_cpu_to_be_32((buf->tso_segsz << 16) |
+ tinfo->tso_header_size);
+ /* Calculate data segment location */
+ return (volatile struct mlx4_wqe_data_seg *)
+ ((uintptr_t)tseg + tinfo->wqe_tso_seg_size);
+}
+
+/**
+ * Write data segments and header for TSO uni/multi segment packet.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param ctrl
+ * Pointer to the WQE control segment.
+ *
+ * @return
+ * Pointer to the next WQE control segment on success, NULL otherwise.
+ */
+static volatile struct mlx4_wqe_ctrl_seg *
+mlx4_tx_burst_tso(struct rte_mbuf *buf, struct txq *txq,
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
+{
+ volatile struct mlx4_wqe_data_seg *dseg;
+ volatile struct mlx4_wqe_ctrl_seg *ctrl_next;
+ struct mlx4_sq *sq = &txq->msq;
+ struct tso_info tinfo;
+ struct pv *pv;
+ int pv_counter;
+ int ret;
+
+ ret = mlx4_tx_burst_tso_get_params(buf, txq, &tinfo);
+ if (unlikely(ret))
+ goto error;
+ dseg = mlx4_tx_burst_fill_tso_hdr(buf, txq, &tinfo, ctrl);
+ if (unlikely(dseg == NULL))
+ goto error;
+ if ((uintptr_t)dseg >= (uintptr_t)sq->eob)
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ ((uintptr_t)dseg - sq->size);
+ ctrl_next = mlx4_tx_burst_fill_tso_dsegs(buf, txq, &tinfo, dseg, ctrl);
+ if (unlikely(ctrl_next == NULL))
+ goto error;
+ /* Write the first DWORD of each TXBB save earlier. */
+ if (likely(tinfo.pv_counter)) {
+ pv = tinfo.pv;
+ pv_counter = tinfo.pv_counter;
+ /* Need a barrier here before writing the first TXBB word. */
+ rte_io_wmb();
+ do {
+ --pv_counter;
+ *pv[pv_counter].dst = pv[pv_counter].val;
+ } while (pv_counter > 0);
+ }
+ ctrl->fence_size = tinfo.fence_size;
+ sq->remain_size -= tinfo.wqe_size;
+ return ctrl_next;
+error:
+ txq->stats.odropped++;
+ return NULL;
+}
+
+/**
* Write data segments of multi-segment packet.
*
* @param buf
@@ -569,6 +915,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t flags16[2];
} srcrb;
uint32_t lkey;
+ bool tso = txq->priv->tso && (buf->ol_flags & PKT_TX_TCP_SEG);
/* Clean up old buffer. */
if (likely(elt->buf != NULL)) {
@@ -587,7 +934,16 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
} while (tmp != NULL);
}
RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
- if (buf->nb_segs == 1) {
+ if (tso) {
+ /* Change opcode to TSO */
+ owner_opcode &= ~MLX4_OPCODE_CONFIG_CMD;
+ owner_opcode |= MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR;
+ ctrl_next = mlx4_tx_burst_tso(buf, txq, ctrl);
+ if (!ctrl_next) {
+ elt->buf = NULL;
+ break;
+ }
+ } else if (buf->nb_segs == 1) {
/* Validate WQE space in the send queue. */
if (sq->remain_size < MLX4_TXBB_SIZE) {
elt->buf = NULL;
@@ -728,11 +1084,13 @@ rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe,
* bit[4] - MLX4_CQE_STATUS_TCP
* bit[3] - MLX4_CQE_STATUS_IPV4OPT
* bit[2] - MLX4_CQE_STATUS_IPV6
- * bit[1] - MLX4_CQE_STATUS_IPV4F
+ * bit[1] - MLX4_CQE_STATUS_IPF
* bit[0] - MLX4_CQE_STATUS_IPV4
* giving a total of up to 256 entries.
*/
idx |= ((status & MLX4_CQE_STATUS_PTYPE_MASK) >> 22);
+ if (status & MLX4_CQE_STATUS_IPV6)
+ idx |= ((status & MLX4_CQE_STATUS_IPV6F) >> 11);
return mlx4_ptype_table[idx];
}
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index 4c025e3a..ffa8abfc 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -90,7 +90,7 @@ struct mlx4_txq_stats {
unsigned int idx; /**< Mapping index. */
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
- uint64_t odropped; /**< Total of packets not sent when Tx ring full. */
+ uint64_t odropped; /**< Total number of packets failed to transmit. */
};
/** Tx queue descriptor. */
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 6edaadbb..9aa7440d 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -116,8 +116,14 @@ mlx4_get_tx_port_offloads(struct priv *priv)
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
}
- if (priv->hw_csum_l2tun)
+ if (priv->tso)
+ offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (priv->hw_csum_l2tun) {
offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (priv->tso)
+ offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ }
return offloads;
}
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 8a5229e6..2e70dec5 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -33,6 +33,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c
ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE)
@@ -56,6 +57,7 @@ LDLIBS += -ldl
else
LDLIBS += -libverbs -lmlx5
endif
+LDLIBS += -lmnl
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
@@ -100,7 +102,7 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q sh -- '$<' '$@' \
HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \
infiniband/mlx5dv.h \
- enum MLX5DV_CONTEXT_MASK_STRIDING_RQ \
+ enum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_IBV_DEVICE_TUNNEL_SUPPORT \
@@ -150,7 +152,237 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q sh -- '$<' '$@' \
HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \
infiniband/verbs.h \
- enum IBV_FLOW_SPEC_ACTION_COUNT \
+ type 'struct ibv_counter_set_init_attr' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NL_NLDEV \
+ rdma/rdma_netlink.h \
+ enum RDMA_NL_NLDEV \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_CMD_GET \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_CMD_GET \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_CMD_PORT_GET \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_CMD_PORT_GET \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_DEV_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_DEV_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_DEV_NAME \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_DEV_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_PORT_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_PORT_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_NDEV_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_PHYS_SWITCH_ID \
+ linux/if_link.h \
+ enum IFLA_PHYS_SWITCH_ID \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_PHYS_PORT_NAME \
+ linux/if_link.h \
+ enum IFLA_PHYS_PORT_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_ACT \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_ACT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_FLAGS \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_FLAGS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_TYPE \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_TYPE \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IP_PROTO \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IP_PROTO \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_VLAN_ID \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_ID \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_VLAN_PRIO \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_PRIO \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_ETH_TYPE \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_ACT_VLAN \
+ linux/tc_act/tc_vlan.h \
+ enum TCA_VLAN_PUSH_VLAN_PRIORITY \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_STATIC_ASSERT \
+ /usr/include/assert.h \
+ define static_assert \
$(AUTOCONF_OUTPUT)
# Create mlx5_autoconf.h or update it in case it differs from the new one.
@@ -177,7 +409,7 @@ endif
$(LIB_GLUE): mlx5_glue.o
$Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
-Wl,-h,$(LIB_GLUE) \
- -s -shared -o $@ $< -libverbs -lmlx5
+ -shared -o $@ $< -libverbs -lmlx5
mlx5_glue.o: mlx5_autoconf.h
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index c933e274..ec63bc6e 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -13,6 +13,7 @@
#include <errno.h>
#include <net/if.h>
#include <sys/mman.h>
+#include <linux/netlink.h>
#include <linux/rtnetlink.h>
/* Verbs header. */
@@ -36,6 +37,7 @@
#include <rte_kvargs.h>
#include <rte_rwlock.h>
#include <rte_spinlock.h>
+#include <rte_string_fns.h>
#include "mlx5.h"
#include "mlx5_utils.h"
@@ -90,6 +92,9 @@
/* Activate Netlink support in VF mode. */
#define MLX5_VF_NL_EN "vf_nl_en"
+/* Select port representors to instantiate. */
+#define MLX5_REPRESENTOR "representor"
+
#ifndef HAVE_IBV_MLX5_MOD_MPW
#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@@ -237,6 +242,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* In case mlx5_dev_stop() has not been called. */
mlx5_dev_interrupt_handler_uninstall(dev);
mlx5_traffic_disable(dev);
+ mlx5_flow_flush(dev, NULL);
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
@@ -256,7 +262,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
priv->txqs_n = 0;
priv->txqs = NULL;
}
- mlx5_flow_delete_drop_queue(dev);
mlx5_mprq_free_mp(dev);
mlx5_mr_release(dev);
if (priv->pd != NULL) {
@@ -273,8 +278,12 @@ mlx5_dev_close(struct rte_eth_dev *dev)
mlx5_socket_uninit(dev);
if (priv->config.vf)
mlx5_nl_mac_addr_flush(dev);
- if (priv->nl_socket >= 0)
- close(priv->nl_socket);
+ if (priv->nl_socket_route >= 0)
+ close(priv->nl_socket_route);
+ if (priv->nl_socket_rdma >= 0)
+ close(priv->nl_socket_rdma);
+ if (priv->mnl_socket)
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
ret = mlx5_hrxq_ibv_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
@@ -303,7 +312,27 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (ret)
DRV_LOG(WARNING, "port %u some flows still remain",
dev->data->port_id);
+ if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+ unsigned int c = 0;
+ unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[i];
+
+ i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
+ while (i--) {
+ struct priv *opriv =
+ rte_eth_devices[port_id[i]].data->dev_private;
+
+ if (!opriv ||
+ opriv->domain_id != priv->domain_id ||
+ &rte_eth_devices[port_id[i]] == dev)
+ continue;
+ ++c;
+ }
+ if (!c)
+ claim_zero(rte_eth_switch_domain_free(priv->domain_id));
+ }
memset(priv, 0, sizeof(*priv));
+ priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
}
const struct eth_dev_ops mlx5_dev_ops = {
@@ -370,6 +399,10 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.dev_set_link_down = mlx5_set_link_down,
.dev_set_link_up = mlx5_set_link_up,
.dev_close = mlx5_dev_close,
+ .promiscuous_enable = mlx5_promiscuous_enable,
+ .promiscuous_disable = mlx5_promiscuous_disable,
+ .allmulticast_enable = mlx5_allmulticast_enable,
+ .allmulticast_disable = mlx5_allmulticast_disable,
.link_update = mlx5_link_update,
.stats_get = mlx5_stats_get,
.stats_reset = mlx5_stats_reset,
@@ -400,39 +433,6 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.is_removed = mlx5_is_removed,
};
-static struct {
- struct rte_pci_addr pci_addr; /* associated PCI address */
- uint32_t ports; /* physical ports bitfield. */
-} mlx5_dev[32];
-
-/**
- * Get device index in mlx5_dev[] from PCI bus address.
- *
- * @param[in] pci_addr
- * PCI bus address to look for.
- *
- * @return
- * mlx5_dev[] index on success, -1 on failure.
- */
-static int
-mlx5_dev_idx(struct rte_pci_addr *pci_addr)
-{
- unsigned int i;
- int ret = -1;
-
- assert(pci_addr != NULL);
- for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) {
- if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) &&
- (mlx5_dev[i].pci_addr.bus == pci_addr->bus) &&
- (mlx5_dev[i].pci_addr.devid == pci_addr->devid) &&
- (mlx5_dev[i].pci_addr.function == pci_addr->function))
- return i;
- if ((mlx5_dev[i].ports == 0) && (ret == -1))
- ret = i;
- }
- return ret;
-}
-
/**
* Verify and store value for device argument.
*
@@ -452,6 +452,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
struct mlx5_dev_config *config = opaque;
unsigned long tmp;
+ /* No-op, port representors are processed in mlx5_dev_spawn(). */
+ if (!strcmp(MLX5_REPRESENTOR, key))
+ return 0;
errno = 0;
tmp = strtoul(val, NULL, 0);
if (errno) {
@@ -524,6 +527,7 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
MLX5_RX_VEC_EN,
MLX5_L3_VXLAN_EN,
MLX5_VF_NL_EN,
+ MLX5_REPRESENTOR,
NULL,
};
struct rte_kvargs *kvlist;
@@ -600,7 +604,7 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
rte_memseg_walk(find_lower_va_bound, &addr);
/* keep distance to hugepages to minimize potential conflicts. */
- addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
+ addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE));
/* anonymous mmap, no real memory consumption. */
addr = mmap(addr, MLX5_UAR_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -668,133 +672,115 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
}
/**
- * DPDK callback to register a PCI device.
+ * Spawn an Ethernet device from Verbs information.
*
- * This function creates an Ethernet device for each port of a given
- * PCI device.
- *
- * @param[in] pci_drv
- * PCI driver structure (mlx5_driver).
- * @param[in] pci_dev
- * PCI device information.
+ * @param dpdk_dev
+ * Backing DPDK device.
+ * @param ibv_dev
+ * Verbs device.
+ * @param vf
+ * If nonzero, enable VF-specific features.
+ * @param[in] switch_info
+ * Switch properties of Ethernet device.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * A valid Ethernet device object on success, NULL otherwise and rte_errno
+ * is set. The following error is defined:
+ *
+ * EBUSY: device is not supposed to be spawned.
*/
-static int
-mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
+static struct rte_eth_dev *
+mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ struct ibv_device *ibv_dev,
+ int vf,
+ const struct mlx5_switch_info *switch_info)
{
- struct ibv_device **list = NULL;
- struct ibv_device *ibv_dev;
+ struct ibv_context *ctx;
+ struct ibv_device_attr_ex attr;
+ struct ibv_port_attr port_attr;
+ struct ibv_pd *pd = NULL;
+ struct mlx5dv_context dv_attr = { .comp_mask = 0 };
+ struct mlx5_dev_config config = {
+ .vf = !!vf,
+ .tx_vec_en = 1,
+ .rx_vec_en = 1,
+ .mpw_hdr_dseg = 0,
+ .txq_inline = MLX5_ARG_UNSET,
+ .txqs_inline = MLX5_ARG_UNSET,
+ .inline_max_packet_sz = MLX5_ARG_UNSET,
+ .vf_nl_en = 1,
+ .mprq = {
+ .enabled = 0,
+ .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
+ .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
+ .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
+ },
+ };
+ struct rte_eth_dev *eth_dev = NULL;
+ struct priv *priv = NULL;
int err = 0;
- struct ibv_context *attr_ctx = NULL;
- struct ibv_device_attr_ex device_attr;
- unsigned int vf = 0;
unsigned int mps;
unsigned int cqe_comp;
unsigned int tunnel_en = 0;
unsigned int mpls_en = 0;
unsigned int swp = 0;
- unsigned int verb_priorities = 0;
unsigned int mprq = 0;
unsigned int mprq_min_stride_size_n = 0;
unsigned int mprq_max_stride_size_n = 0;
unsigned int mprq_min_stride_num_n = 0;
unsigned int mprq_max_stride_num_n = 0;
- int idx;
- int i;
- struct mlx5dv_context attrs_out = {0};
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- struct ibv_counter_set_description cs_desc;
+ struct ibv_counter_set_description cs_desc = { .counter_type = 0 };
#endif
+ struct ether_addr mac;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ int own_domain_id = 0;
+ unsigned int i;
- /* Prepare shared data between primary and secondary process. */
- mlx5_prepare_shared_data();
- assert(pci_drv == &mlx5_driver);
- /* Get mlx5_dev[] index. */
- idx = mlx5_dev_idx(&pci_dev->addr);
- if (idx == -1) {
- DRV_LOG(ERR, "this driver cannot support any more adapters");
- err = ENOMEM;
- goto error;
- }
- DRV_LOG(DEBUG, "using driver device index %d", idx);
- /* Save PCI address. */
- mlx5_dev[idx].pci_addr = pci_dev->addr;
- list = mlx5_glue->get_device_list(&i);
- if (list == NULL) {
- assert(errno);
- err = errno;
- if (errno == ENOSYS)
- DRV_LOG(ERR,
- "cannot list devices, is ib_uverbs loaded?");
- goto error;
- }
- assert(i >= 0);
- /*
- * For each listed device, check related sysfs entry against
- * the provided PCI ID.
- */
- while (i != 0) {
- struct rte_pci_addr pci_addr;
+ /* Determine if this port representor is supposed to be spawned. */
+ if (switch_info->representor && dpdk_dev->devargs) {
+ struct rte_eth_devargs eth_da;
- --i;
- DRV_LOG(DEBUG, "checking device \"%s\"", list[i]->name);
- if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
- continue;
- if ((pci_dev->addr.domain != pci_addr.domain) ||
- (pci_dev->addr.bus != pci_addr.bus) ||
- (pci_dev->addr.devid != pci_addr.devid) ||
- (pci_dev->addr.function != pci_addr.function))
- continue;
- DRV_LOG(INFO, "PCI information matches, using device \"%s\"",
- list[i]->name);
- vf = ((pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
- (pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) ||
- (pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
- (pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
- attr_ctx = mlx5_glue->open_device(list[i]);
- rte_errno = errno;
- err = rte_errno;
- break;
- }
- if (attr_ctx == NULL) {
- switch (err) {
- case 0:
- DRV_LOG(ERR,
- "cannot access device, is mlx5_ib loaded?");
- err = ENODEV;
- break;
- case EINVAL:
- DRV_LOG(ERR,
- "cannot use device, are drivers up to date?");
- break;
+ err = rte_eth_devargs_parse(dpdk_dev->devargs->args, &eth_da);
+ if (err) {
+ rte_errno = -err;
+ DRV_LOG(ERR, "failed to process device arguments: %s",
+ strerror(rte_errno));
+ return NULL;
}
- goto error;
+ for (i = 0; i < eth_da.nb_representor_ports; ++i)
+ if (eth_da.representor_ports[i] ==
+ (uint16_t)switch_info->port_name)
+ break;
+ if (i == eth_da.nb_representor_ports) {
+ rte_errno = EBUSY;
+ return NULL;
+ }
+ }
+ /* Prepare shared data between primary and secondary process. */
+ mlx5_prepare_shared_data();
+ errno = 0;
+ ctx = mlx5_glue->open_device(ibv_dev);
+ if (!ctx) {
+ rte_errno = errno ? errno : ENODEV;
+ return NULL;
}
- ibv_dev = list[i];
- DRV_LOG(DEBUG, "device opened");
#ifdef HAVE_IBV_MLX5_MOD_SWP
- attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
+ dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
#endif
/*
* Multi-packet send is supported by ConnectX-4 Lx PF as well
* as all ConnectX-5 devices.
*/
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
+ dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
#endif
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
+ dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
#endif
- mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
- if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
- if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+ mlx5_glue->dv_query_device(ctx, &dv_attr);
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
DRV_LOG(DEBUG, "enhanced MPW is supported");
mps = MLX5_MPW_ENHANCED;
} else {
@@ -805,15 +791,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
DRV_LOG(DEBUG, "MPW isn't supported");
mps = MLX5_MPW_DISABLED;
}
+ config.mps = mps;
#ifdef HAVE_IBV_MLX5_MOD_SWP
- if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
- swp = attrs_out.sw_parsing_caps.sw_parsing_offloads;
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+ swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
DRV_LOG(DEBUG, "SWP support: %u", swp);
#endif
+ config.swp = !!swp;
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
struct mlx5dv_striding_rq_caps mprq_caps =
- attrs_out.striding_rq_caps;
+ dv_attr.striding_rq_caps;
DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
mprq_caps.min_single_stride_log_num_of_bytes);
@@ -835,18 +823,21 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
mprq_caps.min_single_wqe_log_num_of_strides;
mprq_max_stride_num_n =
mprq_caps.max_single_wqe_log_num_of_strides;
+ config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
+ mprq_min_stride_num_n);
}
#endif
if (RTE_CACHE_LINE_SIZE == 128 &&
- !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
+ !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
cqe_comp = 0;
else
cqe_comp = 1;
+ config.cqe_comp = cqe_comp;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
- tunnel_en = ((attrs_out.tunnel_offloads_caps &
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+ tunnel_en = ((dv_attr.tunnel_offloads_caps &
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
- (attrs_out.tunnel_offloads_caps &
+ (dv_attr.tunnel_offloads_caps &
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
}
DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
@@ -855,10 +846,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
DRV_LOG(WARNING,
"tunnel offloading disabled due to old OFED/rdma-core version");
#endif
+ config.tunnel_en = tunnel_en;
#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- mpls_en = ((attrs_out.tunnel_offloads_caps &
+ mpls_en = ((dv_attr.tunnel_offloads_caps &
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
- (attrs_out.tunnel_offloads_caps &
+ (dv_attr.tunnel_offloads_caps &
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
mpls_en ? "" : "not ");
@@ -866,387 +858,595 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
" old OFED/rdma-core version or firmware configuration");
#endif
- err = mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr);
+ config.mpls_en = mpls_en;
+ err = mlx5_glue->query_device_ex(ctx, NULL, &attr);
if (err) {
DEBUG("ibv_query_device_ex() failed");
goto error;
}
- DRV_LOG(INFO, "%u port(s) detected",
- device_attr.orig_attr.phys_port_cnt);
- for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
- char name[RTE_ETH_NAME_MAX_LEN];
- int len;
- uint32_t port = i + 1; /* ports are indexed from one */
- uint32_t test = (1 << i);
- struct ibv_context *ctx = NULL;
- struct ibv_port_attr port_attr;
- struct ibv_pd *pd = NULL;
- struct priv *priv = NULL;
- struct rte_eth_dev *eth_dev = NULL;
- struct ibv_device_attr_ex device_attr_ex;
- struct ether_addr mac;
- struct mlx5_dev_config config = {
- .cqe_comp = cqe_comp,
- .mps = mps,
- .tunnel_en = tunnel_en,
- .mpls_en = mpls_en,
- .tx_vec_en = 1,
- .rx_vec_en = 1,
- .mpw_hdr_dseg = 0,
- .txq_inline = MLX5_ARG_UNSET,
- .txqs_inline = MLX5_ARG_UNSET,
- .inline_max_packet_sz = MLX5_ARG_UNSET,
- .vf_nl_en = 1,
- .swp = !!swp,
- .mprq = {
- .enabled = 0, /* Disabled by default. */
- .stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
- mprq_min_stride_num_n),
- .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
- .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
- },
- };
-
- len = snprintf(name, sizeof(name), PCI_PRI_FMT,
- pci_dev->addr.domain, pci_dev->addr.bus,
- pci_dev->addr.devid, pci_dev->addr.function);
- if (device_attr.orig_attr.phys_port_cnt > 1)
- snprintf(name + len, sizeof(name), " port %u", i);
- mlx5_dev[idx].ports |= test;
- if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- eth_dev = rte_eth_dev_attach_secondary(name);
- if (eth_dev == NULL) {
- DRV_LOG(ERR, "can not attach rte ethdev");
- rte_errno = ENOMEM;
- err = rte_errno;
- goto error;
- }
- eth_dev->device = &pci_dev->device;
- eth_dev->dev_ops = &mlx5_dev_sec_ops;
- err = mlx5_uar_init_secondary(eth_dev);
- if (err) {
- err = rte_errno;
- goto error;
- }
- /* Receive command fd from primary process */
- err = mlx5_socket_connect(eth_dev);
- if (err < 0) {
- err = rte_errno;
- goto error;
- }
- /* Remap UAR for Tx queues. */
- err = mlx5_tx_uar_remap(eth_dev, err);
- if (err) {
- err = rte_errno;
- goto error;
- }
- /*
- * Ethdev pointer is still required as input since
- * the primary device is not accessible from the
- * secondary process.
- */
- eth_dev->rx_pkt_burst =
- mlx5_select_rx_function(eth_dev);
- eth_dev->tx_pkt_burst =
- mlx5_select_tx_function(eth_dev);
- rte_eth_dev_probing_finish(eth_dev);
- continue;
- }
- DRV_LOG(DEBUG, "using port %u (%08" PRIx32 ")", port, test);
- ctx = mlx5_glue->open_device(ibv_dev);
- if (ctx == NULL) {
- err = ENODEV;
- goto port_error;
+ if (!switch_info->representor)
+ rte_strlcpy(name, dpdk_dev->name, sizeof(name));
+ else
+ snprintf(name, sizeof(name), "%s_representor_%u",
+ dpdk_dev->name, switch_info->port_name);
+ DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (eth_dev == NULL) {
+ DRV_LOG(ERR, "can not attach rte ethdev");
+ rte_errno = ENOMEM;
+ err = rte_errno;
+ goto error;
}
- /* Check port status. */
- err = mlx5_glue->query_port(ctx, port, &port_attr);
+ eth_dev->device = dpdk_dev;
+ eth_dev->dev_ops = &mlx5_dev_sec_ops;
+ err = mlx5_uar_init_secondary(eth_dev);
if (err) {
- DRV_LOG(ERR, "port query failed: %s", strerror(err));
- goto port_error;
- }
- if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
- DRV_LOG(ERR,
- "port %d is not configured in Ethernet mode",
- port);
- err = EINVAL;
- goto port_error;
- }
- if (port_attr.state != IBV_PORT_ACTIVE)
- DRV_LOG(DEBUG, "port %d is not active: \"%s\" (%d)",
- port,
- mlx5_glue->port_state_str(port_attr.state),
- port_attr.state);
- /* Allocate protection domain. */
- pd = mlx5_glue->alloc_pd(ctx);
- if (pd == NULL) {
- DRV_LOG(ERR, "PD allocation failure");
- err = ENOMEM;
- goto port_error;
+ err = rte_errno;
+ goto error;
}
- mlx5_dev[idx].ports |= test;
- /* from rte_ethdev.c */
- priv = rte_zmalloc("ethdev private structure",
- sizeof(*priv),
- RTE_CACHE_LINE_SIZE);
- if (priv == NULL) {
- DRV_LOG(ERR, "priv allocation failure");
- err = ENOMEM;
- goto port_error;
+ /* Receive command fd from primary process */
+ err = mlx5_socket_connect(eth_dev);
+ if (err < 0) {
+ err = rte_errno;
+ goto error;
}
- priv->ctx = ctx;
- strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
- sizeof(priv->ibdev_path));
- priv->device_attr = device_attr;
- priv->port = port;
- priv->pd = pd;
- priv->mtu = ETHER_MTU;
- err = mlx5_args(&config, pci_dev->device.devargs);
+ /* Remap UAR for Tx queues. */
+ err = mlx5_tx_uar_remap(eth_dev, err);
if (err) {
- DRV_LOG(ERR, "failed to process device arguments: %s",
- strerror(err));
err = rte_errno;
- goto port_error;
+ goto error;
+ }
+ /*
+ * Ethdev pointer is still required as input since
+ * the primary device is not accessible from the
+ * secondary process.
+ */
+ eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
+ eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
+ claim_zero(mlx5_glue->close_device(ctx));
+ return eth_dev;
+ }
+ /* Check port status. */
+ err = mlx5_glue->query_port(ctx, 1, &port_attr);
+ if (err) {
+ DRV_LOG(ERR, "port query failed: %s", strerror(err));
+ goto error;
+ }
+ if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
+ DRV_LOG(ERR, "port is not configured in Ethernet mode");
+ err = EINVAL;
+ goto error;
+ }
+ if (port_attr.state != IBV_PORT_ACTIVE)
+ DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
+ mlx5_glue->port_state_str(port_attr.state),
+ port_attr.state);
+ /* Allocate protection domain. */
+ pd = mlx5_glue->alloc_pd(ctx);
+ if (pd == NULL) {
+ DRV_LOG(ERR, "PD allocation failure");
+ err = ENOMEM;
+ goto error;
+ }
+ priv = rte_zmalloc("ethdev private structure",
+ sizeof(*priv),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DRV_LOG(ERR, "priv allocation failure");
+ err = ENOMEM;
+ goto error;
+ }
+ priv->ctx = ctx;
+ strncpy(priv->ibdev_name, priv->ctx->device->name,
+ sizeof(priv->ibdev_name));
+ strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
+ sizeof(priv->ibdev_path));
+ priv->device_attr = attr;
+ priv->pd = pd;
+ priv->mtu = ETHER_MTU;
+#ifndef RTE_ARCH_64
+ /* Initialize UAR access locks for 32bit implementations. */
+ rte_spinlock_init(&priv->uar_lock_cq);
+ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+ rte_spinlock_init(&priv->uar_lock[i]);
+#endif
+ /* Some internal functions rely on Netlink sockets, open them now. */
+ priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
+ priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
+ priv->nl_sn = 0;
+ priv->representor = !!switch_info->representor;
+ priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+ priv->representor_id =
+ switch_info->representor ? switch_info->port_name : -1;
+ /*
+ * Look for sibling devices in order to reuse their switch domain
+ * if any, otherwise allocate one.
+ */
+ i = mlx5_dev_to_port_id(dpdk_dev, NULL, 0);
+ if (i > 0) {
+ uint16_t port_id[i];
+
+ i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i);
+ while (i--) {
+ const struct priv *opriv =
+ rte_eth_devices[port_id[i]].data->dev_private;
+
+ if (!opriv ||
+ opriv->domain_id ==
+ RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
+ continue;
+ priv->domain_id = opriv->domain_id;
+ break;
}
- err = mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex);
+ }
+ if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+ err = rte_eth_switch_domain_alloc(&priv->domain_id);
if (err) {
- DRV_LOG(ERR, "ibv_query_device_ex() failed");
- goto port_error;
+ err = rte_errno;
+ DRV_LOG(ERR, "unable to allocate switch domain: %s",
+ strerror(rte_errno));
+ goto error;
}
- config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
- IBV_DEVICE_RAW_IP_CSUM);
- DRV_LOG(DEBUG, "checksum offloading is %ssupported",
- (config.hw_csum ? "" : "not "));
+ own_domain_id = 1;
+ }
+ err = mlx5_args(&config, dpdk_dev->devargs);
+ if (err) {
+ err = rte_errno;
+ DRV_LOG(ERR, "failed to process device arguments: %s",
+ strerror(rte_errno));
+ goto error;
+ }
+ config.hw_csum = !!(attr.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
+ DRV_LOG(DEBUG, "checksum offloading is %ssupported",
+ (config.hw_csum ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- config.flow_counter_en = !!(device_attr.max_counter_sets);
- mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
- DRV_LOG(DEBUG,
- "counter type = %d, num of cs = %ld, attributes = %d",
- cs_desc.counter_type, cs_desc.num_of_cs,
- cs_desc.attributes);
+ config.flow_counter_en = !!attr.max_counter_sets;
+ mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
+ DRV_LOG(DEBUG, "counter type = %d, num of cs = %ld, attributes = %d",
+ cs_desc.counter_type, cs_desc.num_of_cs,
+ cs_desc.attributes);
#endif
- config.ind_table_max_size =
- device_attr_ex.rss_caps.max_rwq_indirection_table_size;
- /* Remove this check once DPDK supports larger/variable
- * indirection tables. */
- if (config.ind_table_max_size >
- (unsigned int)ETH_RSS_RETA_SIZE_512)
- config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
- DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
- config.ind_table_max_size);
- config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
- IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
- DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
- (config.hw_vlan_strip ? "" : "not "));
-
- config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
- IBV_RAW_PACKET_CAP_SCATTER_FCS);
- DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
- (config.hw_fcs_strip ? "" : "not "));
-
+ config.ind_table_max_size =
+ attr.rss_caps.max_rwq_indirection_table_size;
+ /*
+ * Remove this check once DPDK supports larger/variable
+ * indirection tables.
+ */
+ if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
+ config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+ DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
+ config.ind_table_max_size);
+ config.hw_vlan_strip = !!(attr.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
+ DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
+ (config.hw_vlan_strip ? "" : "not "));
+ config.hw_fcs_strip = !!(attr.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_SCATTER_FCS);
+ DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
+ (config.hw_fcs_strip ? "" : "not "));
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
- config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
+ config.hw_padding = !!attr.rx_pad_end_addr_align;
#endif
- DRV_LOG(DEBUG,
- "hardware Rx end alignment padding is %ssupported",
- (config.hw_padding ? "" : "not "));
- config.vf = vf;
- config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
- (device_attr_ex.tso_caps.supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
- if (config.tso)
- config.tso_max_payload_sz =
- device_attr_ex.tso_caps.max_tso;
- if (config.mps && !mps) {
- DRV_LOG(ERR,
- "multi-packet send not supported on this device"
- " (" MLX5_TXQ_MPW_EN ")");
- err = ENOTSUP;
- goto port_error;
- }
- DRV_LOG(INFO, "%s MPS is %s",
- config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
- config.mps != MLX5_MPW_DISABLED ? "enabled" :
- "disabled");
- if (config.cqe_comp && !cqe_comp) {
- DRV_LOG(WARNING, "Rx CQE compression isn't supported");
- config.cqe_comp = 0;
- }
- config.mprq.enabled = config.mprq.enabled && mprq;
- if (config.mprq.enabled) {
- if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
- config.mprq.stride_num_n < mprq_min_stride_num_n) {
- config.mprq.stride_num_n =
- RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
- mprq_min_stride_num_n);
- DRV_LOG(WARNING,
- "the number of strides"
- " for Multi-Packet RQ is out of range,"
- " setting default value (%u)",
- 1 << config.mprq.stride_num_n);
- }
- config.mprq.min_stride_size_n = mprq_min_stride_size_n;
- config.mprq.max_stride_size_n = mprq_max_stride_size_n;
- }
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL) {
- DRV_LOG(ERR, "can not allocate rte ethdev");
- err = ENOMEM;
- goto port_error;
- }
- eth_dev->data->dev_private = priv;
- priv->dev_data = eth_dev->data;
- eth_dev->data->mac_addrs = priv->mac;
- eth_dev->device = &pci_dev->device;
- rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->device->driver = &mlx5_driver.driver;
- err = mlx5_uar_init_primary(eth_dev);
- if (err) {
- err = rte_errno;
- goto port_error;
- }
- /* Configure the first MAC address by default. */
- if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
- DRV_LOG(ERR,
- "port %u cannot get MAC address, is mlx5_en"
- " loaded? (errno: %s)",
- eth_dev->data->port_id, strerror(errno));
- err = ENODEV;
- goto port_error;
+ DRV_LOG(DEBUG, "hardware Rx end alignment padding is %ssupported",
+ (config.hw_padding ? "" : "not "));
+ config.tso = (attr.tso_caps.max_tso > 0 &&
+ (attr.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (config.tso)
+ config.tso_max_payload_sz = attr.tso_caps.max_tso;
+ if (config.mps && !mps) {
+ DRV_LOG(ERR,
+ "multi-packet send not supported on this device"
+ " (" MLX5_TXQ_MPW_EN ")");
+ err = ENOTSUP;
+ goto error;
+ }
+ DRV_LOG(INFO, "%sMPS is %s",
+ config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
+ config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ if (config.cqe_comp && !cqe_comp) {
+ DRV_LOG(WARNING, "Rx CQE compression isn't supported");
+ config.cqe_comp = 0;
+ }
+ if (config.mprq.enabled && mprq) {
+ if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
+ config.mprq.stride_num_n < mprq_min_stride_num_n) {
+ config.mprq.stride_num_n =
+ RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
+ mprq_min_stride_num_n);
+ DRV_LOG(WARNING,
+ "the number of strides"
+ " for Multi-Packet RQ is out of range,"
+ " setting default value (%u)",
+ 1 << config.mprq.stride_num_n);
}
- DRV_LOG(INFO,
- "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
- eth_dev->data->port_id,
- mac.addr_bytes[0], mac.addr_bytes[1],
- mac.addr_bytes[2], mac.addr_bytes[3],
- mac.addr_bytes[4], mac.addr_bytes[5]);
+ config.mprq.min_stride_size_n = mprq_min_stride_size_n;
+ config.mprq.max_stride_size_n = mprq_max_stride_size_n;
+ } else if (config.mprq.enabled && !mprq) {
+ DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
+ config.mprq.enabled = 0;
+ }
+ eth_dev = rte_eth_dev_allocate(name);
+ if (eth_dev == NULL) {
+ DRV_LOG(ERR, "can not allocate rte ethdev");
+ err = ENOMEM;
+ goto error;
+ }
+ if (priv->representor)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ eth_dev->data->dev_private = priv;
+ priv->dev_data = eth_dev->data;
+ eth_dev->data->mac_addrs = priv->mac;
+ eth_dev->device = dpdk_dev;
+ eth_dev->device->driver = &mlx5_driver.driver;
+ err = mlx5_uar_init_primary(eth_dev);
+ if (err) {
+ err = rte_errno;
+ goto error;
+ }
+ /* Configure the first MAC address by default. */
+ if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
+ DRV_LOG(ERR,
+ "port %u cannot get MAC address, is mlx5_en"
+ " loaded? (errno: %s)",
+ eth_dev->data->port_id, strerror(rte_errno));
+ err = ENODEV;
+ goto error;
+ }
+ DRV_LOG(INFO,
+ "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
+ eth_dev->data->port_id,
+ mac.addr_bytes[0], mac.addr_bytes[1],
+ mac.addr_bytes[2], mac.addr_bytes[3],
+ mac.addr_bytes[4], mac.addr_bytes[5]);
#ifndef NDEBUG
- {
- char ifname[IF_NAMESIZE];
-
- if (mlx5_get_ifname(eth_dev, &ifname) == 0)
- DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
- eth_dev->data->port_id, ifname);
- else
- DRV_LOG(DEBUG, "port %u ifname is unknown",
- eth_dev->data->port_id);
- }
+ {
+ char ifname[IF_NAMESIZE];
+
+ if (mlx5_get_ifname(eth_dev, &ifname) == 0)
+ DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
+ eth_dev->data->port_id, ifname);
+ else
+ DRV_LOG(DEBUG, "port %u ifname is unknown",
+ eth_dev->data->port_id);
+ }
#endif
- /* Get actual MTU if possible. */
- err = mlx5_get_mtu(eth_dev, &priv->mtu);
- if (err) {
- err = rte_errno;
- goto port_error;
- }
- DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
- priv->mtu);
- /*
- * Initialize burst functions to prevent crashes before link-up.
- */
- eth_dev->rx_pkt_burst = removed_rx_burst;
- eth_dev->tx_pkt_burst = removed_tx_burst;
- eth_dev->dev_ops = &mlx5_dev_ops;
- /* Register MAC address. */
- claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
- priv->nl_socket = -1;
- priv->nl_sn = 0;
- if (vf && config.vf_nl_en) {
- priv->nl_socket = mlx5_nl_init(RTMGRP_LINK);
- if (priv->nl_socket < 0)
- priv->nl_socket = -1;
- mlx5_nl_mac_addr_sync(eth_dev);
- }
- TAILQ_INIT(&priv->flows);
- TAILQ_INIT(&priv->ctrl_flows);
- /* Hint libmlx5 to use PMD allocator for data plane resources */
- struct mlx5dv_ctx_allocators alctr = {
- .alloc = &mlx5_alloc_verbs_buf,
- .free = &mlx5_free_verbs_buf,
- .data = priv,
- };
- mlx5_glue->dv_set_context_attr(ctx,
- MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
- (void *)((uintptr_t)&alctr));
- /* Bring Ethernet device up. */
- DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
- eth_dev->data->port_id);
- mlx5_set_link_up(eth_dev);
- /*
- * Even though the interrupt handler is not installed yet,
- * interrupts will still trigger on the asyn_fd from
- * Verbs context returned by ibv_open_device().
- */
- mlx5_link_update(eth_dev, 0);
- /* Store device configuration on private structure. */
- priv->config = config;
- /* Create drop queue. */
- err = mlx5_flow_create_drop_queue(eth_dev);
- if (err) {
- DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
- eth_dev->data->port_id, strerror(rte_errno));
- err = rte_errno;
- goto port_error;
- }
- /* Supported Verbs flow priority number detection. */
- if (verb_priorities == 0)
- verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
- if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {
- DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u",
- eth_dev->data->port_id, verb_priorities);
- goto port_error;
+ /* Get actual MTU if possible. */
+ err = mlx5_get_mtu(eth_dev, &priv->mtu);
+ if (err) {
+ err = rte_errno;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
+ priv->mtu);
+ /* Initialize burst functions to prevent crashes before link-up. */
+ eth_dev->rx_pkt_burst = removed_rx_burst;
+ eth_dev->tx_pkt_burst = removed_tx_burst;
+ eth_dev->dev_ops = &mlx5_dev_ops;
+ /* Register MAC address. */
+ claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
+ if (vf && config.vf_nl_en)
+ mlx5_nl_mac_addr_sync(eth_dev);
+ priv->mnl_socket = mlx5_nl_flow_socket_create();
+ if (!priv->mnl_socket) {
+ err = -rte_errno;
+ DRV_LOG(WARNING,
+ "flow rules relying on switch offloads will not be"
+ " supported: cannot open libmnl socket: %s",
+ strerror(rte_errno));
+ } else {
+ struct rte_flow_error error;
+ unsigned int ifindex = mlx5_ifindex(eth_dev);
+
+ if (!ifindex) {
+ err = -rte_errno;
+ error.message =
+ "cannot retrieve network interface index";
+ } else {
+ err = mlx5_nl_flow_init(priv->mnl_socket, ifindex,
+ &error);
}
- priv->config.max_verbs_prio = verb_priorities;
- /*
- * Once the device is added to the list of memory event
- * callback, its global MR cache table cannot be expanded
- * on the fly because of deadlock. If it overflows, lookup
- * should be done by searching MR list linearly, which is slow.
- */
- err = mlx5_mr_btree_init(&priv->mr.cache,
- MLX5_MR_BTREE_CACHE_N * 2,
- eth_dev->device->numa_node);
if (err) {
- err = rte_errno;
- goto port_error;
+ DRV_LOG(WARNING,
+ "flow rules relying on switch offloads will"
+ " not be supported: %s: %s",
+ error.message, strerror(rte_errno));
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ priv->mnl_socket = NULL;
}
- /* Add device to memory callback list. */
- rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
- LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
- priv, mem_event_cb);
- rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
- rte_eth_dev_probing_finish(eth_dev);
- continue;
-port_error:
- if (priv)
- rte_free(priv);
- if (pd)
- claim_zero(mlx5_glue->dealloc_pd(pd));
- if (ctx)
- claim_zero(mlx5_glue->close_device(ctx));
- if (eth_dev && rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_eth_dev_release_port(eth_dev);
- break;
}
+ TAILQ_INIT(&priv->flows);
+ TAILQ_INIT(&priv->ctrl_flows);
+ /* Hint libmlx5 to use PMD allocator for data plane resources */
+ struct mlx5dv_ctx_allocators alctr = {
+ .alloc = &mlx5_alloc_verbs_buf,
+ .free = &mlx5_free_verbs_buf,
+ .data = priv,
+ };
+ mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
+ (void *)((uintptr_t)&alctr));
+ /* Bring Ethernet device up. */
+ DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
+ eth_dev->data->port_id);
+ mlx5_set_link_up(eth_dev);
/*
- * XXX if something went wrong in the loop above, there is a resource
- * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
- * long as the dpdk does not provide a way to deallocate a ethdev and a
- * way to enumerate the registered ethdevs to free the previous ones.
+ * Even though the interrupt handler is not installed yet,
+ * interrupts will still trigger on the asyn_fd from
+ * Verbs context returned by ibv_open_device().
*/
- /* no port found, complain */
- if (!mlx5_dev[idx].ports) {
- rte_errno = ENODEV;
+ mlx5_link_update(eth_dev, 0);
+ /* Store device configuration on private structure. */
+ priv->config = config;
+ /* Supported Verbs flow priority number detection. */
+ err = mlx5_flow_discover_priorities(eth_dev);
+ if (err < 0)
+ goto error;
+ priv->config.flow_prio = err;
+ /*
+ * Once the device is added to the list of memory event
+ * callback, its global MR cache table cannot be expanded
+ * on the fly because of deadlock. If it overflows, lookup
+ * should be done by searching MR list linearly, which is slow.
+ */
+ err = mlx5_mr_btree_init(&priv->mr.cache,
+ MLX5_MR_BTREE_CACHE_N * 2,
+ eth_dev->device->numa_node);
+ if (err) {
err = rte_errno;
+ goto error;
}
+ /* Add device to memory callback list. */
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
+ priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ return eth_dev;
error:
- if (attr_ctx)
- claim_zero(mlx5_glue->close_device(attr_ctx));
- if (list)
- mlx5_glue->free_device_list(list);
- if (err) {
- rte_errno = err;
+ if (priv) {
+ if (priv->nl_socket_route >= 0)
+ close(priv->nl_socket_route);
+ if (priv->nl_socket_rdma >= 0)
+ close(priv->nl_socket_rdma);
+ if (priv->mnl_socket)
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ if (own_domain_id)
+ claim_zero(rte_eth_switch_domain_free(priv->domain_id));
+ rte_free(priv);
+ }
+ if (pd)
+ claim_zero(mlx5_glue->dealloc_pd(pd));
+ if (eth_dev)
+ rte_eth_dev_release_port(eth_dev);
+ if (ctx)
+ claim_zero(mlx5_glue->close_device(ctx));
+ assert(err > 0);
+ rte_errno = err;
+ return NULL;
+}
+
+/** Data associated with devices to spawn. */
+struct mlx5_dev_spawn_data {
+ unsigned int ifindex; /**< Network interface index. */
+ struct mlx5_switch_info info; /**< Switch information. */
+ struct ibv_device *ibv_dev; /**< Associated IB device. */
+ struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
+};
+
+/**
+ * Comparison callback to sort device data.
+ *
+ * This is meant to be used with qsort().
+ *
+ * @param a[in]
+ * Pointer to pointer to first data object.
+ * @param b[in]
+ * Pointer to pointer to second data object.
+ *
+ * @return
+ * 0 if both objects are equal, less than 0 if the first argument is less
+ * than the second, greater than 0 otherwise.
+ */
+static int
+mlx5_dev_spawn_data_cmp(const void *a, const void *b)
+{
+ const struct mlx5_switch_info *si_a =
+ &((const struct mlx5_dev_spawn_data *)a)->info;
+ const struct mlx5_switch_info *si_b =
+ &((const struct mlx5_dev_spawn_data *)b)->info;
+ int ret;
+
+ /* Master device first. */
+ ret = si_b->master - si_a->master;
+ if (ret)
+ return ret;
+ /* Then representor devices. */
+ ret = si_b->representor - si_a->representor;
+ if (ret)
+ return ret;
+ /* Unidentified devices come last in no specific order. */
+ if (!si_a->representor)
+ return 0;
+ /* Order representors by name. */
+ return si_a->port_name - si_b->port_name;
+}
+
+/**
+ * DPDK callback to register a PCI device.
+ *
+ * This function spawns Ethernet devices out of a given PCI device.
+ *
+ * @param[in] pci_drv
+ * PCI driver structure (mlx5_driver).
+ * @param[in] pci_dev
+ * PCI device information.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct ibv_device **ibv_list;
+ unsigned int n = 0;
+ int vf;
+ int ret;
+
+ assert(pci_drv == &mlx5_driver);
+ errno = 0;
+ ibv_list = mlx5_glue->get_device_list(&ret);
+ if (!ibv_list) {
+ rte_errno = errno ? errno : ENOSYS;
+ DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
return -rte_errno;
}
- return 0;
+
+ struct ibv_device *ibv_match[ret + 1];
+
+ while (ret-- > 0) {
+ struct rte_pci_addr pci_addr;
+
+ DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
+ if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr))
+ continue;
+ if (pci_dev->addr.domain != pci_addr.domain ||
+ pci_dev->addr.bus != pci_addr.bus ||
+ pci_dev->addr.devid != pci_addr.devid ||
+ pci_dev->addr.function != pci_addr.function)
+ continue;
+ DRV_LOG(INFO, "PCI information matches for device \"%s\"",
+ ibv_list[ret]->name);
+ ibv_match[n++] = ibv_list[ret];
+ }
+ ibv_match[n] = NULL;
+
+ struct mlx5_dev_spawn_data list[n];
+ int nl_route = n ? mlx5_nl_init(NETLINK_ROUTE) : -1;
+ int nl_rdma = n ? mlx5_nl_init(NETLINK_RDMA) : -1;
+ unsigned int i;
+ unsigned int u;
+
+ /*
+ * The existence of several matching entries (n > 1) means port
+ * representors have been instantiated. No existing Verbs call nor
+ * /sys entries can tell them apart, this can only be done through
+ * Netlink calls assuming kernel drivers are recent enough to
+ * support them.
+ *
+ * In the event of identification failure through Netlink, try again
+ * through sysfs, then either:
+ *
+ * 1. No device matches (n == 0), complain and bail out.
+ * 2. A single IB device matches (n == 1) and is not a representor,
+ * assume no switch support.
+ * 3. Otherwise no safe assumptions can be made; complain louder and
+ * bail out.
+ */
+ for (i = 0; i != n; ++i) {
+ list[i].ibv_dev = ibv_match[i];
+ list[i].eth_dev = NULL;
+ if (nl_rdma < 0)
+ list[i].ifindex = 0;
+ else
+ list[i].ifindex = mlx5_nl_ifindex
+ (nl_rdma, list[i].ibv_dev->name);
+ if (nl_route < 0 ||
+ !list[i].ifindex ||
+ mlx5_nl_switch_info(nl_route, list[i].ifindex,
+ &list[i].info) ||
+ ((!list[i].info.representor && !list[i].info.master) &&
+ mlx5_sysfs_switch_info(list[i].ifindex, &list[i].info))) {
+ list[i].ifindex = 0;
+ memset(&list[i].info, 0, sizeof(list[i].info));
+ continue;
+ }
+ }
+ if (nl_rdma >= 0)
+ close(nl_rdma);
+ if (nl_route >= 0)
+ close(nl_route);
+ /* Count unidentified devices. */
+ for (u = 0, i = 0; i != n; ++i)
+ if (!list[i].info.master && !list[i].info.representor)
+ ++u;
+ if (u) {
+ if (n == 1 && u == 1) {
+ /* Case #2. */
+ DRV_LOG(INFO, "no switch support detected");
+ } else {
+ /* Case #3. */
+ DRV_LOG(ERR,
+ "unable to tell which of the matching devices"
+ " is the master (lack of kernel support?)");
+ n = 0;
+ }
+ }
+ /*
+ * Sort list to probe devices in natural order for users convenience
+ * (i.e. master first, then representors from lowest to highest ID).
+ */
+ if (n)
+ qsort(list, n, sizeof(*list), mlx5_dev_spawn_data_cmp);
+ switch (pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
+ vf = 1;
+ break;
+ default:
+ vf = 0;
+ }
+ for (i = 0; i != n; ++i) {
+ uint32_t restore;
+
+ list[i].eth_dev = mlx5_dev_spawn
+ (&pci_dev->device, list[i].ibv_dev, vf, &list[i].info);
+ if (!list[i].eth_dev) {
+ if (rte_errno != EBUSY)
+ break;
+ /* Device is disabled, ignore it. */
+ continue;
+ }
+ restore = list[i].eth_dev->data->dev_flags;
+ rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
+ /* Restore non-PCI flags cleared by the above call. */
+ list[i].eth_dev->data->dev_flags |= restore;
+ rte_eth_dev_probing_finish(list[i].eth_dev);
+ }
+ mlx5_glue->free_device_list(ibv_list);
+ if (!n) {
+ DRV_LOG(WARNING,
+ "no Verbs device matches PCI device " PCI_PRI_FMT ","
+ " are kernel drivers loaded?",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ rte_errno = ENOENT;
+ ret = -rte_errno;
+ } else if (i != n) {
+ DRV_LOG(ERR,
+ "probe of PCI device " PCI_PRI_FMT " aborted after"
+ " encountering an error: %s",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function,
+ strerror(rte_errno));
+ ret = -rte_errno;
+ /* Roll back. */
+ while (i--) {
+ if (!list[i].eth_dev)
+ continue;
+ mlx5_dev_close(list[i].eth_dev);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(list[i].eth_dev->data->dev_private);
+ claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
+ }
+ /* Restore original error. */
+ rte_errno = -ret;
+ } else {
+ ret = 0;
+ }
+ return ret;
}
static const struct rte_pci_id mlx5_pci_id_map[] = {
@@ -1435,10 +1635,13 @@ glue_error:
/**
* Driver initialization routine.
*/
-RTE_INIT(rte_mlx5_pmd_init);
-static void
-rte_mlx5_pmd_init(void)
+RTE_INIT(rte_mlx5_pmd_init)
{
+ /* Initialize driver log type. */
+ mlx5_logtype = rte_log_register("pmd.net.mlx5");
+ if (mlx5_logtype >= 0)
+ rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
+
/* Build the static tables for Verbs conversion. */
mlx5_set_ptype_table();
mlx5_set_cksum_table();
@@ -1453,6 +1656,11 @@ rte_mlx5_pmd_init(void)
/* Match the size of Rx completion entry to the size of a cacheline. */
if (RTE_CACHE_LINE_SIZE == 128)
setenv("MLX5_CQE_SIZE", "128", 0);
+ /*
+ * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
+ * cleanup all the Verbs resources even when the device was removed.
+ */
+ setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
if (mlx5_glue_init())
return;
@@ -1480,11 +1688,3 @@ rte_mlx5_pmd_init(void)
RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
-
-/** Initialize driver log type. */
-RTE_INIT(vdev_netvsc_init_log)
-{
- mlx5_logtype = rte_log_register("pmd.net.mlx5");
- if (mlx5_logtype >= 0)
- rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
-}
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 997b04a3..a3a34cff 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -53,6 +53,14 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2,
};
+/** Switch information returned by mlx5_nl_switch_info(). */
+struct mlx5_switch_info {
+ uint32_t master:1; /**< Master device. */
+ uint32_t representor:1; /**< Representor device. */
+ int32_t port_name; /**< Representor port name. */
+ uint64_t switch_id; /**< Switch identifier. */
+};
+
LIST_HEAD(mlx5_dev_list, priv);
/* Shared memory between primary and secondary processes. */
@@ -114,7 +122,7 @@ struct mlx5_dev_config {
unsigned int min_rxqs_num;
/* Rx queue count threshold to enable MPRQ. */
} mprq; /* Configurations for Multi-Packet RQ. */
- unsigned int max_verbs_prio; /* Number of Verb flow priorities. */
+ unsigned int flow_prio; /* Number of flow priorities. */
unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int ind_table_max_size; /* Maximum indirection table size. */
int txq_inline; /* Maximum packet size for inlining. */
@@ -131,9 +139,6 @@ enum mlx5_verbs_alloc_type {
MLX5_VERBS_ALLOC_TYPE_RX_QUEUE,
};
-/* 8 Verbs priorities. */
-#define MLX5_VERBS_FLOW_PRIO_8 8
-
/**
* Verbs allocator needs a context to know in the callback which kind of
* resources it is allocating.
@@ -145,12 +150,27 @@ struct mlx5_verbs_alloc_ctx {
LIST_HEAD(mlx5_mr_list, mlx5_mr);
+/* Flow drop context necessary due to Verbs API. */
+struct mlx5_drop {
+ struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
+ struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */
+};
+
+/** DPDK port to network interface index (ifindex) conversion. */
+struct mlx5_nl_flow_ptoi {
+ uint16_t port_id; /**< DPDK port ID. */
+ unsigned int ifindex; /**< Network interface index. */
+};
+
+struct mnl_socket;
+
struct priv {
LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr_ex device_attr; /* Device properties. */
struct ibv_pd *pd; /* Protection Domain. */
+ char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
@@ -159,8 +179,10 @@ struct priv {
unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
/* Device properties. */
uint16_t mtu; /* Configured MTU. */
- uint8_t port; /* Physical port number. */
unsigned int isolated:1; /* Whether isolated mode is enabled. */
+ unsigned int representor:1; /* Device is a port representor. */
+ uint16_t domain_id; /* Switch domain identifier. */
+ int32_t representor_id; /* Port representor identifier. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
@@ -171,9 +193,11 @@ struct priv {
struct rte_intr_handle intr_handle; /* Interrupt handler. */
unsigned int (*reta_idx)[]; /* RETA index table. */
unsigned int reta_idx_n; /* RETA index size. */
- struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
+ struct mlx5_drop drop_queue; /* Flow drop queues. */
struct mlx5_flows flows; /* RTE Flow rules. */
struct mlx5_flows ctrl_flows; /* Control flow rules. */
+ LIST_HEAD(counters, mlx5_flow_counter) flow_counters;
+ /* Flow counters. */
struct {
uint32_t dev_gen; /* Generation number to flush local caches. */
rte_rwlock_t rwlock; /* MR Lock. */
@@ -196,8 +220,15 @@ struct priv {
struct mlx5_dev_config config; /* Device configuration. */
struct mlx5_verbs_alloc_ctx verbs_alloc_ctx;
/* Context for Verbs allocator. */
- int nl_socket; /* Netlink socket. */
+ int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
+ int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
uint32_t nl_sn; /* Netlink message sequence number. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */
+ rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
+ /* UAR same-page access control required in 32bit implementations. */
+#endif
+ struct mnl_socket *mnl_socket; /* Libmnl socket. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
@@ -209,9 +240,12 @@ int mlx5_getenv_int(const char *);
/* mlx5_ethdev.c */
+int mlx5_get_master_ifname(const struct rte_eth_dev *dev,
+ char (*ifname)[IF_NAMESIZE]);
int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]);
-int mlx5_ifindex(const struct rte_eth_dev *dev);
-int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr);
+unsigned int mlx5_ifindex(const struct rte_eth_dev *dev);
+int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr,
+ int master);
int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep,
unsigned int flags);
@@ -236,6 +270,11 @@ int mlx5_set_link_up(struct rte_eth_dev *dev);
int mlx5_is_removed(struct rte_eth_dev *dev);
eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev);
+unsigned int mlx5_dev_to_port_id(const struct rte_device *dev,
+ uint16_t *port_list,
+ unsigned int port_list_n);
+int mlx5_sysfs_switch_info(unsigned int ifindex,
+ struct mlx5_switch_info *info);
/* mlx5_mac.c */
@@ -296,7 +335,8 @@ int mlx5_traffic_restart(struct rte_eth_dev *dev);
/* mlx5_flow.c */
-unsigned int mlx5_get_max_verbs_prio(struct rte_eth_dev *dev);
+int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
+void mlx5_flow_print(struct rte_flow *flow);
int mlx5_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
@@ -343,7 +383,7 @@ int mlx5_socket_connect(struct rte_eth_dev *priv);
/* mlx5_nl.c */
-int mlx5_nl_init(uint32_t nlgroups);
+int mlx5_nl_init(int protocol);
int mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
uint32_t index);
int mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac,
@@ -352,5 +392,27 @@ void mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev);
void mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev);
int mlx5_nl_promisc(struct rte_eth_dev *dev, int enable);
int mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable);
+unsigned int mlx5_nl_ifindex(int nl, const char *name);
+int mlx5_nl_switch_info(int nl, unsigned int ifindex,
+ struct mlx5_switch_info *info);
+
+/* mlx5_nl_flow.c */
+
+int mlx5_nl_flow_transpose(void *buf,
+ size_t size,
+ const struct mlx5_nl_flow_ptoi *ptoi,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+void mlx5_nl_flow_brand(void *buf, uint32_t handle);
+int mlx5_nl_flow_create(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error);
+int mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error);
+int mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex,
+ struct rte_flow_error *error);
+struct mnl_socket *mlx5_nl_flow_socket_create(void);
+void mlx5_nl_flow_socket_destroy(struct mnl_socket *nl);
#endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 51124cdc..f2a16795 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -64,10 +64,11 @@
#define MLX5_VPMD_MIN_TXQS 4
/* Threshold of buffer replenishment for vectorized Rx. */
-#define MLX5_VPMD_RXQ_RPLNSH_THRESH 64U
+#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
+ (RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2))
/* Maximum size of burst for vectorized Rx. */
-#define MLX5_VPMD_RX_MAX_BURST MLX5_VPMD_RXQ_RPLNSH_THRESH
+#define MLX5_VPMD_RX_MAX_BURST 64U
/*
* Maximum size of burst for vectorized Tx. This is related to the maximum size
@@ -86,17 +87,31 @@
#define MLX5_LINK_STATUS_TIMEOUT 10
/* Reserved address space for UAR mapping. */
-#define MLX5_UAR_SIZE (1ULL << 32)
+#define MLX5_UAR_SIZE (1ULL << (sizeof(uintptr_t) * 4))
/* Offset of reserved UAR address space to hugepage memory. Offset is used here
* to minimize possibility of address next to hugepage being used by other code
* in either primary or secondary process, failing to map TX UAR would make TX
* packets invisible to HW.
*/
-#define MLX5_UAR_OFFSET (1ULL << 32)
+#define MLX5_UAR_OFFSET (1ULL << (sizeof(uintptr_t) * 4))
+
+/* Maximum number of UAR pages used by a port,
+ * These are the size and mask for an array of mutexes used to synchronize
+ * the access to port's UARs on platforms that do not support 64 bit writes.
+ * In such systems it is possible to issue the 64 bits DoorBells through two
+ * consecutive writes, each write 32 bits. The access to a UAR page (which can
+ * be accessible by all threads in the process) must be synchronized
+ * (for example, using a semaphore). Such a synchronization is not required
+ * when ringing DoorBells on different UAR pages.
+ * A port with 512 Tx queues uses 8, 4kBytes, UAR pages which are shared
+ * among the ports.
+ */
+#define MLX5_UAR_PAGE_NUM_MAX 64
+#define MLX5_UAR_PAGE_NUM_MASK ((MLX5_UAR_PAGE_NUM_MAX) - 1)
/* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */
-#define MLX5_MPRQ_STRIDE_NUM_N 4U
+#define MLX5_MPRQ_STRIDE_NUM_N 6U
/* Two-byte shift is disabled for Multi-Packet RQ. */
#define MLX5_MPRQ_TWO_BYTE_SHIFT 0
@@ -111,6 +126,11 @@
#define MLX5_MPRQ_MIN_RXQS 12
/* Cache size of mempool for Multi-Packet RQ. */
-#define MLX5_MPRQ_MP_CACHE_SZ 32
+#define MLX5_MPRQ_MP_CACHE_SZ 32U
+
+/* Definition of static_assert found in /usr/include/assert.h */
+#ifndef HAVE_STATIC_ASSERT
+#define static_assert _Static_assert
+#endif
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 90488af3..34c5b95e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -41,6 +41,32 @@
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+/* Supported speed values found in /usr/include/linux/ethtool.h */
+#ifndef HAVE_SUPPORTED_40000baseKR4_Full
+#define SUPPORTED_40000baseKR4_Full (1 << 23)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseCR4_Full
+#define SUPPORTED_40000baseCR4_Full (1 << 24)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseSR4_Full
+#define SUPPORTED_40000baseSR4_Full (1 << 25)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseLR4_Full
+#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseKR4_Full
+#define SUPPORTED_56000baseKR4_Full (1 << 27)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseCR4_Full
+#define SUPPORTED_56000baseCR4_Full (1 << 28)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseSR4_Full
+#define SUPPORTED_56000baseSR4_Full (1 << 29)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseLR4_Full
+#define SUPPORTED_56000baseLR4_Full (1 << 30)
+#endif
+
/* Add defines in case the running kernel is not the same as user headers. */
#ifndef ETHTOOL_GLINKSETTINGS
struct ethtool_link_settings {
@@ -93,7 +119,7 @@ struct ethtool_link_settings {
#endif
/**
- * Get interface name from private structure.
+ * Get master interface name from private structure.
*
* @param[in] dev
* Pointer to Ethernet device.
@@ -104,7 +130,8 @@ struct ethtool_link_settings {
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
+mlx5_get_master_ifname(const struct rte_eth_dev *dev,
+ char (*ifname)[IF_NAMESIZE])
{
struct priv *priv = dev->data->dev_private;
DIR *dir;
@@ -166,7 +193,7 @@ try_dev_id:
if (dev_port == dev_port_prev)
goto try_dev_id;
dev_port_prev = dev_port;
- if (dev_port == (priv->port - 1u))
+ if (dev_port == 0)
strlcpy(match, name, sizeof(match));
}
closedir(dir);
@@ -179,30 +206,59 @@ try_dev_id:
}
/**
- * Get the interface index from device name.
+ * Get interface name from private structure.
+ *
+ * This is a port representor-aware version of mlx5_get_master_ifname().
*
* @param[in] dev
* Pointer to Ethernet device.
+ * @param[out] ifname
+ * Interface name output buffer.
*
* @return
- * Interface index on success, a negative errno value otherwise and
- * rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
+mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int ifindex =
+ priv->nl_socket_rdma >= 0 ?
+ mlx5_nl_ifindex(priv->nl_socket_rdma, priv->ibdev_name) : 0;
+
+ if (!ifindex) {
+ if (!priv->representor)
+ return mlx5_get_master_ifname(dev, ifname);
+ rte_errno = ENXIO;
+ return -rte_errno;
+ }
+ if (if_indextoname(ifindex, &(*ifname)[0]))
+ return 0;
+ rte_errno = errno;
+ return -rte_errno;
+}
+
+/**
+ * Get the interface index from device name.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * Nonzero interface index on success, zero otherwise and rte_errno is set.
+ */
+unsigned int
mlx5_ifindex(const struct rte_eth_dev *dev)
{
char ifname[IF_NAMESIZE];
- int ret;
+ unsigned int ifindex;
- ret = mlx5_get_ifname(dev, &ifname);
- if (ret)
- return ret;
- ret = if_nametoindex(ifname);
- if (ret == -1) {
+ if (mlx5_get_ifname(dev, &ifname))
+ return 0;
+ ifindex = if_nametoindex(ifname);
+ if (!ifindex)
rte_errno = errno;
- return -rte_errno;
- }
- return ret;
+ return ifindex;
}
/**
@@ -214,12 +270,16 @@ mlx5_ifindex(const struct rte_eth_dev *dev)
* Request number to pass to ioctl().
* @param[out] ifr
* Interface request structure output buffer.
+ * @param master
+ * When device is a port representor, perform request on master device
+ * instead.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
+mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr,
+ int master)
{
int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
int ret = 0;
@@ -228,7 +288,10 @@ mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
rte_errno = errno;
return -rte_errno;
}
- ret = mlx5_get_ifname(dev, &ifr->ifr_name);
+ if (master)
+ ret = mlx5_get_master_ifname(dev, &ifr->ifr_name);
+ else
+ ret = mlx5_get_ifname(dev, &ifr->ifr_name);
if (ret)
goto error;
ret = ioctl(sock, req, ifr);
@@ -258,7 +321,7 @@ int
mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
{
struct ifreq request;
- int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request);
+ int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request, 0);
if (ret)
return ret;
@@ -282,7 +345,7 @@ mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct ifreq request = { .ifr_mtu = mtu, };
- return mlx5_ifreq(dev, SIOCSIFMTU, &request);
+ return mlx5_ifreq(dev, SIOCSIFMTU, &request, 0);
}
/**
@@ -302,13 +365,13 @@ int
mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
{
struct ifreq request;
- int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request);
+ int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request, 0);
if (ret)
return ret;
request.ifr_flags &= keep;
request.ifr_flags |= flags & ~keep;
- return mlx5_ifreq(dev, SIOCSIFFLAGS, &request);
+ return mlx5_ifreq(dev, SIOCSIFFLAGS, &request, 0);
}
/**
@@ -335,15 +398,15 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
- rss_hash_default_key_len)) {
- DRV_LOG(ERR, "port %u RSS key len must be %zu Bytes long",
- dev->data->port_id, rss_hash_default_key_len);
+ MLX5_RSS_HASH_KEY_LEN)) {
+ DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long",
+ dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN));
rte_errno = EINVAL;
return -rte_errno;
}
priv->rss_conf.rss_key =
rte_realloc(priv->rss_conf.rss_key,
- rss_hash_default_key_len, 0);
+ MLX5_RSS_HASH_KEY_LEN, 0);
if (!priv->rss_conf.rss_key) {
DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
dev->data->port_id, rxqs_n);
@@ -354,8 +417,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
use_app_rss_key ?
dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
rss_hash_default_key,
- rss_hash_default_key_len);
- priv->rss_conf.rss_key_len = rss_hash_default_key_len;
+ MLX5_RSS_HASH_KEY_LEN);
+ priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN;
priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
@@ -473,10 +536,34 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
priv->reta_idx_n : config->ind_table_max_size;
- info->hash_key_size = rss_hash_default_key_len;
+ info->hash_key_size = MLX5_RSS_HASH_KEY_LEN;
info->speed_capa = priv->link_speed_capa;
info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
mlx5_set_default_params(dev, info);
+ info->switch_info.name = dev->data->name;
+ info->switch_info.domain_id = priv->domain_id;
+ info->switch_info.port_id = priv->representor_id;
+ if (priv->representor) {
+ unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[i];
+
+ i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
+ while (i--) {
+ struct priv *opriv =
+ rte_eth_devices[port_id[i]].data->dev_private;
+
+ if (!opriv ||
+ opriv->representor ||
+ opriv->domain_id != priv->domain_id)
+ continue;
+ /*
+ * Override switch name with that of the master
+ * device.
+ */
+ info->switch_info.name = opriv->dev_data->name;
+ break;
+ }
+ }
}
/**
@@ -540,7 +627,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
int link_speed = 0;
int ret;
- ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1);
if (ret) {
DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
dev->data->port_id, strerror(rte_errno));
@@ -550,7 +637,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
ifr.ifr_data = (void *)&edata;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
if (ret) {
DRV_LOG(WARNING,
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
@@ -611,7 +698,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
uint64_t sc;
int ret;
- ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1);
if (ret) {
DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
dev->data->port_id, strerror(rte_errno));
@@ -621,7 +708,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
ifr.ifr_data = (void *)&gcmd;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
if (ret) {
DRV_LOG(DEBUG,
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
@@ -638,7 +725,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
*ecmd = gcmd;
ifr.ifr_data = (void *)ecmd;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
if (ret) {
DRV_LOG(DEBUG,
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
@@ -801,7 +888,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
int ret;
ifr.ifr_data = (void *)&ethpause;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
if (ret) {
DRV_LOG(WARNING,
"port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
@@ -854,7 +941,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 0);
if (ret) {
DRV_LOG(WARNING,
"port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
@@ -1193,3 +1280,93 @@ mlx5_is_removed(struct rte_eth_dev *dev)
return 1;
return 0;
}
+
+/**
+ * Get port ID list of mlx5 instances sharing a common device.
+ *
+ * @param[in] dev
+ * Device to look for.
+ * @param[out] port_list
+ * Result buffer for collected port IDs.
+ * @param port_list_n
+ * Maximum number of entries in result buffer. If 0, @p port_list can be
+ * NULL.
+ *
+ * @return
+ * Number of matching instances regardless of the @p port_list_n
+ * parameter, 0 if none were found.
+ */
+unsigned int
+mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list,
+ unsigned int port_list_n)
+{
+ uint16_t id;
+ unsigned int n = 0;
+
+ RTE_ETH_FOREACH_DEV(id) {
+ struct rte_eth_dev *ldev = &rte_eth_devices[id];
+
+ if (!ldev->device ||
+ !ldev->device->driver ||
+ strcmp(ldev->device->driver->name, MLX5_DRIVER_NAME) ||
+ ldev->device != dev)
+ continue;
+ if (n < port_list_n)
+ port_list[n] = id;
+ n++;
+ }
+ return n;
+}
+
+/**
+ * Get switch information associated with network interface.
+ *
+ * @param ifindex
+ * Network interface index.
+ * @param[out] info
+ * Switch information object, populated in case of success.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
+{
+ char ifname[IF_NAMESIZE];
+ FILE *file;
+ struct mlx5_switch_info data = { .master = 0, };
+ bool port_name_set = false;
+ bool port_switch_id_set = false;
+ char c;
+
+ if (!if_indextoname(ifindex, ifname)) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+
+ MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name",
+ ifname);
+ MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id",
+ ifname);
+
+ file = fopen(phys_port_name, "rb");
+ if (file != NULL) {
+ port_name_set =
+ fscanf(file, "%d%c", &data.port_name, &c) == 2 &&
+ c == '\n';
+ fclose(file);
+ }
+ file = fopen(phys_switch_id, "rb");
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ port_switch_id_set =
+ fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 &&
+ c == '\n';
+ fclose(file);
+ data.master = port_switch_id_set && !port_name_set;
+ data.representor = port_switch_id_set && port_name_set;
+ *info = data;
+ return 0;
+}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 994be05b..ca4625b6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4,6 +4,7 @@
*/
#include <sys/queue.h>
+#include <stdalign.h>
#include <stdint.h>
#include <string.h>
@@ -31,497 +32,289 @@
#include "mlx5_prm.h"
#include "mlx5_glue.h"
-/* Flow priority for control plane flows. */
-#define MLX5_CTRL_FLOW_PRIORITY 1
-
-/* Internet Protocol versions. */
-#define MLX5_IPV4 4
-#define MLX5_IPV6 6
-#define MLX5_GRE 47
-
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-struct ibv_flow_spec_counter_action {
- int dummy;
-};
-#endif
-
/* Dev ops structure defined in mlx5.c */
extern const struct eth_dev_ops mlx5_dev_ops;
extern const struct eth_dev_ops mlx5_dev_ops_isolate;
-/** Structure give to the conversion functions. */
-struct mlx5_flow_data {
- struct rte_eth_dev *dev; /** Ethernet device. */
- struct mlx5_flow_parse *parser; /** Parser context. */
- struct rte_flow_error *error; /** Error context. */
-};
-
-static int
-mlx5_flow_create_eth(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
-
-static int
-mlx5_flow_create_vlan(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
-
-static int
-mlx5_flow_create_ipv4(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
-
-static int
-mlx5_flow_create_ipv6(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
-
-static int
-mlx5_flow_create_udp(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
-
-static int
-mlx5_flow_create_tcp(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
-
-static int
-mlx5_flow_create_vxlan(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
-
-static int
-mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
-
-static int
-mlx5_flow_create_gre(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
-
-static int
-mlx5_flow_create_mpls(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
-
-struct mlx5_flow_parse;
-
-static void
-mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
- unsigned int size);
-
-static int
-mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id);
-
-static int
-mlx5_flow_create_count(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser);
-
-/* Hash RX queue types. */
-enum hash_rxq_type {
- HASH_RXQ_TCPV4,
- HASH_RXQ_UDPV4,
- HASH_RXQ_IPV4,
- HASH_RXQ_TCPV6,
- HASH_RXQ_UDPV6,
- HASH_RXQ_IPV6,
- HASH_RXQ_ETH,
- HASH_RXQ_TUNNEL,
+/* Pattern outer Layer bits. */
+#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
+#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
+#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
+#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
+#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
+#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
+
+/* Pattern inner Layer bits. */
+#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
+#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
+#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
+#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
+
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN (1u << 12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
+#define MLX5_FLOW_LAYER_GRE (1u << 14)
+#define MLX5_FLOW_LAYER_MPLS (1u << 15)
+
+/* Outer Masks. */
+#define MLX5_FLOW_LAYER_OUTER_L3 \
+ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+#define MLX5_FLOW_LAYER_OUTER_L4 \
+ (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
+#define MLX5_FLOW_LAYER_OUTER \
+ (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
+ MLX5_FLOW_LAYER_OUTER_L4)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL \
+ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
+
+/* Inner Masks. */
+#define MLX5_FLOW_LAYER_INNER_L3 \
+ (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
+#define MLX5_FLOW_LAYER_INNER_L4 \
+ (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
+#define MLX5_FLOW_LAYER_INNER \
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
+ MLX5_FLOW_LAYER_INNER_L4)
+
+/* Actions that modify the fate of matching traffic. */
+#define MLX5_FLOW_FATE_DROP (1u << 0)
+#define MLX5_FLOW_FATE_QUEUE (1u << 1)
+#define MLX5_FLOW_FATE_RSS (1u << 2)
+
+/* Modify a packet. */
+#define MLX5_FLOW_MOD_FLAG (1u << 0)
+#define MLX5_FLOW_MOD_MARK (1u << 1)
+#define MLX5_FLOW_MOD_COUNT (1u << 2)
+
+/* possible L3 layers protocols filtering. */
+#define MLX5_IP_PROTOCOL_TCP 6
+#define MLX5_IP_PROTOCOL_UDP 17
+#define MLX5_IP_PROTOCOL_GRE 47
+#define MLX5_IP_PROTOCOL_MPLS 147
+
+/* Priority reserved for default flows. */
+#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
+
+enum mlx5_expansion {
+ MLX5_EXPANSION_ROOT,
+ MLX5_EXPANSION_ROOT_OUTER,
+ MLX5_EXPANSION_ROOT_ETH_VLAN,
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
+ MLX5_EXPANSION_OUTER_ETH,
+ MLX5_EXPANSION_OUTER_ETH_VLAN,
+ MLX5_EXPANSION_OUTER_VLAN,
+ MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV4_UDP,
+ MLX5_EXPANSION_OUTER_IPV4_TCP,
+ MLX5_EXPANSION_OUTER_IPV6,
+ MLX5_EXPANSION_OUTER_IPV6_UDP,
+ MLX5_EXPANSION_OUTER_IPV6_TCP,
+ MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE,
+ MLX5_EXPANSION_GRE,
+ MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_ETH_VLAN,
+ MLX5_EXPANSION_VLAN,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV4_UDP,
+ MLX5_EXPANSION_IPV4_TCP,
+ MLX5_EXPANSION_IPV6,
+ MLX5_EXPANSION_IPV6_UDP,
+ MLX5_EXPANSION_IPV6_TCP,
};
-/* Initialization data for hash RX queue. */
-struct hash_rxq_init {
- uint64_t hash_fields; /* Fields that participate in the hash. */
- uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
- unsigned int flow_priority; /* Flow priority to use. */
- unsigned int ip_version; /* Internet protocol. */
-};
-
-/* Initialization data for hash RX queues. */
-const struct hash_rxq_init hash_rxq_init[] = {
- [HASH_RXQ_TCPV4] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
- IBV_RX_HASH_DST_IPV4 |
- IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
- .flow_priority = 0,
- .ip_version = MLX5_IPV4,
+/** Supported expansion of items. */
+static const struct rte_flow_expand_node mlx5_support_expansion[] = {
+ [MLX5_EXPANSION_ROOT] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_END,
},
- [HASH_RXQ_UDPV4] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
- IBV_RX_HASH_DST_IPV4 |
- IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
- .flow_priority = 0,
- .ip_version = MLX5_IPV4,
+ [MLX5_EXPANSION_ROOT_OUTER] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
+ MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_END,
},
- [HASH_RXQ_IPV4] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
- IBV_RX_HASH_DST_IPV4),
- .dpdk_rss_hf = (ETH_RSS_IPV4 |
- ETH_RSS_FRAG_IPV4),
- .flow_priority = 1,
- .ip_version = MLX5_IPV4,
+ [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_END,
},
- [HASH_RXQ_TCPV6] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
- IBV_RX_HASH_DST_IPV6 |
- IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
- .flow_priority = 0,
- .ip_version = MLX5_IPV6,
+ [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_END,
},
- [HASH_RXQ_UDPV6] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
- IBV_RX_HASH_DST_IPV6 |
- IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
- .flow_priority = 0,
- .ip_version = MLX5_IPV6,
+ [MLX5_EXPANSION_OUTER_ETH] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6,
+ MLX5_EXPANSION_MPLS),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .rss_types = 0,
},
- [HASH_RXQ_IPV6] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
- IBV_RX_HASH_DST_IPV6),
- .dpdk_rss_hf = (ETH_RSS_IPV6 |
- ETH_RSS_FRAG_IPV6),
- .flow_priority = 1,
- .ip_version = MLX5_IPV6,
+ [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .rss_types = 0,
},
- [HASH_RXQ_ETH] = {
- .hash_fields = 0,
- .dpdk_rss_hf = 0,
- .flow_priority = 2,
+ [MLX5_EXPANSION_OUTER_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
},
-};
-
-/* Number of entries in hash_rxq_init[]. */
-const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
-
-/** Structure for holding counter stats. */
-struct mlx5_flow_counter_stats {
- uint64_t hits; /**< Number of packets matched by the rule. */
- uint64_t bytes; /**< Number of bytes matched by the rule. */
-};
-
-/** Structure for Drop queue. */
-struct mlx5_hrxq_drop {
- struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
- struct ibv_qp *qp; /**< Verbs queue pair. */
- struct ibv_wq *wq; /**< Verbs work queue. */
- struct ibv_cq *cq; /**< Verbs completion queue. */
-};
-
-/* Flows structures. */
-struct mlx5_flow {
- uint64_t hash_fields; /**< Fields that participate in the hash. */
- struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
- struct ibv_flow *ibv_flow; /**< Verbs flow. */
- struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
-};
-
-/* Drop flows structures. */
-struct mlx5_flow_drop {
- struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
- struct ibv_flow *ibv_flow; /**< Verbs flow. */
-};
-
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- uint32_t mark:1; /**< Set if the flow is marked. */
- uint32_t drop:1; /**< Drop queue. */
- struct rte_flow_action_rss rss_conf; /**< RSS configuration */
- uint16_t (*queues)[]; /**< Queues indexes to use. */
- uint8_t rss_key[40]; /**< copy of the RSS key. */
- uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */
- struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
- struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
- struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
- /**< Flow with Rx queue. */
-};
-
-/** Static initializer for items. */
-#define ITEMS(...) \
- (const enum rte_flow_item_type []){ \
- __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
- }
-
-#define IS_TUNNEL(type) ( \
- (type) == RTE_FLOW_ITEM_TYPE_VXLAN || \
- (type) == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || \
- (type) == RTE_FLOW_ITEM_TYPE_GRE || \
- (type) == RTE_FLOW_ITEM_TYPE_MPLS)
-
-const uint32_t flow_ptype[] = {
- [RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
- [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE,
- [RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE,
- [RTE_FLOW_ITEM_TYPE_MPLS] = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
-};
-
-#define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12)
-
-const uint32_t ptype_ext[] = {
- [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN |
- RTE_PTYPE_L4_UDP,
- [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)] = RTE_PTYPE_TUNNEL_VXLAN_GPE |
- RTE_PTYPE_L4_UDP,
- [PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE,
- [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)] =
- RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
- [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)] =
- RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
-};
-
-/** Structure to generate a simple graph of layers supported by the NIC. */
-struct mlx5_flow_items {
- /** List of possible actions for these items. */
- const enum rte_flow_action_type *const actions;
- /** Bit-masks corresponding to the possibilities for the item. */
- const void *mask;
- /**
- * Default bit-masks to use when item->mask is not provided. When
- * \default_mask is also NULL, the full supported bit-mask (\mask) is
- * used instead.
- */
- const void *default_mask;
- /** Bit-masks size in bytes. */
- const unsigned int mask_sz;
- /**
- * Conversion function from rte_flow to NIC specific flow.
- *
- * @param item
- * rte_flow item to convert.
- * @param default_mask
- * Default bit-masks to use when item->mask is not provided.
- * @param data
- * Internal structure to store the conversion.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is
- * set.
- */
- int (*convert)(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data);
- /** Size in bytes of the destination structure. */
- const unsigned int dst_sz;
- /** List of possible following items. */
- const enum rte_flow_item_type *const items;
-};
-
-/** Valid action for this PMD. */
-static const enum rte_flow_action_type valid_actions[] = {
- RTE_FLOW_ACTION_TYPE_DROP,
- RTE_FLOW_ACTION_TYPE_QUEUE,
- RTE_FLOW_ACTION_TYPE_MARK,
- RTE_FLOW_ACTION_TYPE_FLAG,
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- RTE_FLOW_ACTION_TYPE_COUNT,
-#endif
- RTE_FLOW_ACTION_TYPE_END,
-};
-
-/** Graph of supported items and associated actions. */
-static const struct mlx5_flow_items mlx5_flow_items[] = {
- [RTE_FLOW_ITEM_TYPE_END] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
- RTE_FLOW_ITEM_TYPE_GRE),
+ [MLX5_EXPANSION_OUTER_IPV4] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT
+ (MLX5_EXPANSION_OUTER_IPV4_UDP,
+ MLX5_EXPANSION_OUTER_IPV4_TCP,
+ MLX5_EXPANSION_GRE),
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER,
},
- [RTE_FLOW_ITEM_TYPE_ETH] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_IPV6),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .type = -1,
- },
- .default_mask = &rte_flow_item_eth_mask,
- .mask_sz = sizeof(struct rte_flow_item_eth),
- .convert = mlx5_flow_create_eth,
- .dst_sz = sizeof(struct ibv_flow_spec_eth),
+ [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE),
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
},
- [RTE_FLOW_ITEM_TYPE_VLAN] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_IPV6),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_vlan){
- .tci = -1,
- .inner_type = -1,
- },
- .default_mask = &rte_flow_item_vlan_mask,
- .mask_sz = sizeof(struct rte_flow_item_vlan),
- .convert = mlx5_flow_create_vlan,
- .dst_sz = 0,
+ [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
},
- [RTE_FLOW_ITEM_TYPE_IPV4] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_GRE),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_ipv4){
- .hdr = {
- .src_addr = -1,
- .dst_addr = -1,
- .type_of_service = -1,
- .next_proto_id = -1,
- },
- },
- .default_mask = &rte_flow_item_ipv4_mask,
- .mask_sz = sizeof(struct rte_flow_item_ipv4),
- .convert = mlx5_flow_create_ipv4,
- .dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext),
+ [MLX5_EXPANSION_OUTER_IPV6] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT
+ (MLX5_EXPANSION_OUTER_IPV6_UDP,
+ MLX5_EXPANSION_OUTER_IPV6_TCP),
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER,
},
- [RTE_FLOW_ITEM_TYPE_IPV6] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_GRE),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_ipv6){
- .hdr = {
- .src_addr = {
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- },
- .dst_addr = {
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- },
- .vtc_flow = -1,
- .proto = -1,
- .hop_limits = -1,
- },
- },
- .default_mask = &rte_flow_item_ipv6_mask,
- .mask_sz = sizeof(struct rte_flow_item_ipv6),
- .convert = mlx5_flow_create_ipv6,
- .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
+ [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE),
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
},
- [RTE_FLOW_ITEM_TYPE_UDP] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN,
- RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
- RTE_FLOW_ITEM_TYPE_MPLS),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_udp){
- .hdr = {
- .src_port = -1,
- .dst_port = -1,
- },
- },
- .default_mask = &rte_flow_item_udp_mask,
- .mask_sz = sizeof(struct rte_flow_item_udp),
- .convert = mlx5_flow_create_udp,
- .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
+ [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
},
- [RTE_FLOW_ITEM_TYPE_TCP] = {
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_tcp){
- .hdr = {
- .src_port = -1,
- .dst_port = -1,
- },
- },
- .default_mask = &rte_flow_item_tcp_mask,
- .mask_sz = sizeof(struct rte_flow_item_tcp),
- .convert = mlx5_flow_create_tcp,
- .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
+ [MLX5_EXPANSION_VXLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
},
- [RTE_FLOW_ITEM_TYPE_GRE] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_IPV6,
- RTE_FLOW_ITEM_TYPE_MPLS),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_gre){
- .protocol = -1,
- },
- .default_mask = &rte_flow_item_gre_mask,
- .mask_sz = sizeof(struct rte_flow_item_gre),
- .convert = mlx5_flow_create_gre,
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- .dst_sz = sizeof(struct ibv_flow_spec_gre),
-#else
- .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
-#endif
+ [MLX5_EXPANSION_VXLAN_GPE] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
},
- [RTE_FLOW_ITEM_TYPE_MPLS] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_IPV6),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_mpls){
- .label_tc_s = "\xff\xff\xf0",
- },
- .default_mask = &rte_flow_item_mpls_mask,
- .mask_sz = sizeof(struct rte_flow_item_mpls),
- .convert = mlx5_flow_create_mpls,
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- .dst_sz = sizeof(struct ibv_flow_spec_mpls),
-#endif
+ [MLX5_EXPANSION_GRE] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
+ .type = RTE_FLOW_ITEM_TYPE_GRE,
},
- [RTE_FLOW_ITEM_TYPE_VXLAN] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4, /* For L3 VXLAN. */
- RTE_FLOW_ITEM_TYPE_IPV6), /* For L3 VXLAN. */
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_vxlan){
- .vni = "\xff\xff\xff",
- },
- .default_mask = &rte_flow_item_vxlan_mask,
- .mask_sz = sizeof(struct rte_flow_item_vxlan),
- .convert = mlx5_flow_create_vxlan,
- .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
+ [MLX5_EXPANSION_MPLS] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_MPLS,
},
- [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_IPV6),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_vxlan_gpe){
- .vni = "\xff\xff\xff",
- },
- .default_mask = &rte_flow_item_vxlan_gpe_mask,
- .mask_sz = sizeof(struct rte_flow_item_vxlan_gpe),
- .convert = mlx5_flow_create_vxlan_gpe,
- .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
+ [MLX5_EXPANSION_ETH] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ },
+ [MLX5_EXPANSION_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ },
+ [MLX5_EXPANSION_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ },
+ [MLX5_EXPANSION_IPV4] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
+ MLX5_EXPANSION_IPV4_TCP),
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER,
+ },
+ [MLX5_EXPANSION_IPV4_UDP] = {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+ },
+ [MLX5_EXPANSION_IPV4_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+ },
+ [MLX5_EXPANSION_IPV6] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
+ MLX5_EXPANSION_IPV6_TCP),
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER,
+ },
+ [MLX5_EXPANSION_IPV6_UDP] = {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+ },
+ [MLX5_EXPANSION_IPV6_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
},
};
-/** Structure to pass to the conversion function. */
-struct mlx5_flow_parse {
- uint32_t inner; /**< Verbs value, set once tunnel is encountered. */
- uint32_t create:1;
- /**< Whether resources should remain after a validate. */
- uint32_t drop:1; /**< Target is a drop queue. */
- uint32_t mark:1; /**< Mark is present in the flow. */
- uint32_t count:1; /**< Count is present in the flow. */
- uint32_t mark_id; /**< Mark identifier. */
- struct rte_flow_action_rss rss_conf; /**< RSS configuration */
- uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
- uint8_t rss_key[40]; /**< copy of the RSS key. */
- enum hash_rxq_type layer; /**< Last pattern layer detected. */
- enum hash_rxq_type out_layer; /**< Last outer pattern layer detected. */
- uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */
- struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
+/** Handles information leading to a drop fate. */
+struct mlx5_flow_verbs {
+ LIST_ENTRY(mlx5_flow_verbs) next;
+ unsigned int size; /**< Size of the attribute. */
struct {
- struct ibv_flow_attr *ibv_attr;
- /**< Pointer to Verbs attributes. */
- unsigned int offset;
- /**< Current position or total size of the attribute. */
- uint64_t hash_fields; /**< Verbs hash fields. */
- } queue[RTE_DIM(hash_rxq_init)];
+ struct ibv_flow_attr *attr;
+ /**< Pointer to the Specification buffer. */
+ uint8_t *specs; /**< Pointer to the specifications. */
+ };
+ struct ibv_flow *flow; /**< Verbs flow pointer. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+ uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+};
+
+/* Counters information. */
+struct mlx5_flow_counter {
+ LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */
+ uint32_t shared:1; /**< Share counter ID with other flow rules. */
+ uint32_t ref_cnt:31; /**< Reference counter. */
+ uint32_t id; /**< Counter ID. */
+ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
+ uint64_t hits; /**< Number of packets matched by the rule. */
+ uint64_t bytes; /**< Number of bytes matched by the rule. */
+};
+
+/* Flow structure. */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+ struct rte_flow_attr attributes; /**< User flow attribute. */
+ uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
+ uint32_t layers;
+ /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
+ uint32_t modifier;
+ /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
+ uint32_t fate;
+ /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
+ uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
+ LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
+ struct mlx5_flow_verbs *cur_verbs;
+ /**< Current Verbs flow structure being filled. */
+ struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */
+ struct rte_flow_action_rss rss;/**< RSS context. */
+ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+ uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
+ void *nl_flow; /**< Netlink flow buffer if relevant. */
};
static const struct rte_flow_ops mlx5_flow_ops = {
@@ -529,12 +322,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.create = mlx5_flow_create,
.destroy = mlx5_flow_destroy,
.flush = mlx5_flow_flush,
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- .query = mlx5_flow_query,
-#else
- .query = NULL,
-#endif
.isolate = mlx5_flow_isolate,
+ .query = mlx5_flow_query,
};
/* Convert FDIR request to Generic flow. */
@@ -569,883 +358,459 @@ struct ibv_spec_header {
uint16_t size;
};
-/**
- * Check item is fully supported by the NIC matching capability.
- *
- * @param item[in]
- * Item specification.
- * @param mask[in]
- * Bit-masks covering supported fields to compare with spec, last and mask in
- * \item.
- * @param size
- * Bit-Mask size in bytes.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+/*
+ * Number of sub priorities.
+ * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
+ * matching on the NIC (firmware dependent) L4 most have the higher priority
+ * followed by L3 and ending with L2.
*/
-static int
-mlx5_flow_item_validate(const struct rte_flow_item *item,
- const uint8_t *mask, unsigned int size)
-{
- unsigned int i;
- const uint8_t *spec = item->spec;
- const uint8_t *last = item->last;
- const uint8_t *m = item->mask ? item->mask : mask;
+#define MLX5_PRIORITY_MAP_L2 2
+#define MLX5_PRIORITY_MAP_L3 1
+#define MLX5_PRIORITY_MAP_L4 0
+#define MLX5_PRIORITY_MAP_MAX 3
+
+/* Map of Verbs to Flow priority with 8 Verbs priorities. */
+static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
+ { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
+};
- if (!spec && (item->mask || last))
- goto error;
- if (!spec)
- return 0;
- /*
- * Single-pass check to make sure that:
- * - item->mask is supported, no bits are set outside mask.
- * - Both masked item->spec and item->last are equal (no range
- * supported).
- */
- for (i = 0; i < size; i++) {
- if (!m[i])
- continue;
- if ((m[i] | mask[i]) != mask[i])
- goto error;
- if (last && ((spec[i] & m[i]) != (last[i] & m[i])))
- goto error;
- }
- return 0;
-error:
- rte_errno = ENOTSUP;
- return -rte_errno;
-}
+/* Map of Verbs to Flow priority with 16 Verbs priorities. */
+static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
+ { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
+ { 9, 10, 11 }, { 12, 13, 14 },
+};
+
+/* Tunnel information. */
+struct mlx5_flow_tunnel_info {
+ uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
+ uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
+};
+
+static struct mlx5_flow_tunnel_info tunnels_info[] = {
+ {
+ .tunnel = MLX5_FLOW_LAYER_VXLAN,
+ .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
+ .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_GRE,
+ .ptype = RTE_PTYPE_TUNNEL_GRE,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_MPLS,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
+ },
+};
/**
- * Extract attribute to the parser.
+ * Discover the maximum number of priority available.
*
- * @param[in] attr
- * Flow rule attributes.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * number of supported flow priority on success, a negative errno
+ * value otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_convert_attributes(const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
+int
+mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
{
- if (attr->group) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL,
- "groups are not supported");
- return -rte_errno;
- }
- if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- NULL,
- "priorities are not supported");
- return -rte_errno;
- }
- if (attr->egress) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL,
- "egress is not supported");
+ struct {
+ struct ibv_flow_attr attr;
+ struct ibv_flow_spec_eth eth;
+ struct ibv_flow_spec_action_drop drop;
+ } flow_attr = {
+ .attr = {
+ .num_of_specs = 2,
+ },
+ .eth = {
+ .type = IBV_FLOW_SPEC_ETH,
+ .size = sizeof(struct ibv_flow_spec_eth),
+ },
+ .drop = {
+ .size = sizeof(struct ibv_flow_spec_action_drop),
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ },
+ };
+ struct ibv_flow *flow;
+ struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
+ uint16_t vprio[] = { 8, 16 };
+ int i;
+ int priority = 0;
+
+ if (!drop) {
+ rte_errno = ENOTSUP;
return -rte_errno;
}
- if (attr->transfer) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
- NULL,
- "transfer is not supported");
- return -rte_errno;
+ for (i = 0; i != RTE_DIM(vprio); i++) {
+ flow_attr.attr.priority = vprio[i] - 1;
+ flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
+ if (!flow)
+ break;
+ claim_zero(mlx5_glue->destroy_flow(flow));
+ priority = vprio[i];
}
- if (!attr->ingress) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "only ingress is supported");
+ switch (priority) {
+ case 8:
+ priority = RTE_DIM(priority_map_3);
+ break;
+ case 16:
+ priority = RTE_DIM(priority_map_5);
+ break;
+ default:
+ rte_errno = ENOTSUP;
+ DRV_LOG(ERR,
+ "port %u verbs maximum priority: %d expected 8/16",
+ dev->data->port_id, vprio[i]);
return -rte_errno;
}
- return 0;
+ mlx5_hrxq_drop_release(dev);
+ DRV_LOG(INFO, "port %u flow maximum priority: %d",
+ dev->data->port_id, priority);
+ return priority;
}
/**
- * Extract actions request to the parser.
+ * Adjust flow priority.
*
* @param dev
* Pointer to Ethernet device.
- * @param[in] actions
- * Associated actions (list terminated by the END action).
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- * @param[in, out] parser
- * Internal parser structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * @param flow
+ * Pointer to an rte flow.
*/
-static int
-mlx5_flow_convert_actions(struct rte_eth_dev *dev,
- const struct rte_flow_action actions[],
- struct rte_flow_error *error,
- struct mlx5_flow_parse *parser)
+static void
+mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- enum { FATE = 1, MARK = 2, COUNT = 4, };
- uint32_t overlap = 0;
struct priv *priv = dev->data->dev_private;
+ uint32_t priority = flow->attributes.priority;
+ uint32_t subpriority = flow->cur_verbs->attr->priority;
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
- continue;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- if (overlap & FATE)
- goto exit_action_overlap;
- overlap |= FATE;
- parser->drop = 1;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- const struct rte_flow_action_queue *queue =
- (const struct rte_flow_action_queue *)
- actions->conf;
-
- if (overlap & FATE)
- goto exit_action_overlap;
- overlap |= FATE;
- if (!queue || (queue->index > (priv->rxqs_n - 1)))
- goto exit_action_not_supported;
- parser->queues[0] = queue->index;
- parser->rss_conf = (struct rte_flow_action_rss){
- .queue_num = 1,
- .queue = parser->queues,
- };
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
- const struct rte_flow_action_rss *rss =
- (const struct rte_flow_action_rss *)
- actions->conf;
- const uint8_t *rss_key;
- uint32_t rss_key_len;
- uint16_t n;
-
- if (overlap & FATE)
- goto exit_action_overlap;
- overlap |= FATE;
- if (rss->func &&
- rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "the only supported RSS hash"
- " function is Toeplitz");
- return -rte_errno;
- }
-#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (parser->rss_conf.level > 1) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "a nonzero RSS encapsulation"
- " level is not supported");
- return -rte_errno;
- }
-#endif
- if (parser->rss_conf.level > 2) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "RSS encapsulation level"
- " > 1 is not supported");
- return -rte_errno;
- }
- if (rss->types & MLX5_RSS_HF_MASK) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "unsupported RSS type"
- " requested");
- return -rte_errno;
- }
- if (rss->key_len) {
- rss_key_len = rss->key_len;
- rss_key = rss->key;
- } else {
- rss_key_len = rss_hash_default_key_len;
- rss_key = rss_hash_default_key;
- }
- if (rss_key_len != RTE_DIM(parser->rss_key)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "RSS hash key must be"
- " exactly 40 bytes long");
- return -rte_errno;
- }
- if (!rss->queue_num) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "no valid queues");
- return -rte_errno;
- }
- if (rss->queue_num > RTE_DIM(parser->queues)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "too many queues for RSS"
- " context");
- return -rte_errno;
- }
- for (n = 0; n < rss->queue_num; ++n) {
- if (rss->queue[n] >= priv->rxqs_n) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "queue id > number of"
- " queues");
- return -rte_errno;
- }
- }
- parser->rss_conf = (struct rte_flow_action_rss){
- .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
- .level = rss->level ? rss->level : 1,
- .types = rss->types,
- .key_len = rss_key_len,
- .queue_num = rss->queue_num,
- .key = memcpy(parser->rss_key, rss_key,
- sizeof(*rss_key) * rss_key_len),
- .queue = memcpy(parser->queues, rss->queue,
- sizeof(*rss->queue) *
- rss->queue_num),
- };
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
- const struct rte_flow_action_mark *mark =
- (const struct rte_flow_action_mark *)
- actions->conf;
-
- if (overlap & MARK)
- goto exit_action_overlap;
- overlap |= MARK;
- if (!mark) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "mark must be defined");
- return -rte_errno;
- } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "mark must be between 0"
- " and 16777199");
- return -rte_errno;
- }
- parser->mark = 1;
- parser->mark_id = mark->id;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
- if (overlap & MARK)
- goto exit_action_overlap;
- overlap |= MARK;
- parser->mark = 1;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
- priv->config.flow_counter_en) {
- if (overlap & COUNT)
- goto exit_action_overlap;
- overlap |= COUNT;
- parser->count = 1;
- } else {
- goto exit_action_not_supported;
- }
- }
- /* When fate is unknown, drop traffic. */
- if (!(overlap & FATE))
- parser->drop = 1;
- if (parser->drop && parser->mark)
- parser->mark = 0;
- if (!parser->rss_conf.queue_num && !parser->drop) {
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "no valid action");
- return -rte_errno;
+ switch (priv->config.flow_prio) {
+ case RTE_DIM(priority_map_3):
+ priority = priority_map_3[priority][subpriority];
+ break;
+ case RTE_DIM(priority_map_5):
+ priority = priority_map_5[priority][subpriority];
+ break;
}
- return 0;
-exit_action_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "action not supported");
- return -rte_errno;
-exit_action_overlap:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "overlapping actions are not supported");
- return -rte_errno;
+ flow->cur_verbs->attr->priority = priority;
}
/**
- * Validate items.
+ * Get a flow counter.
*
- * @param[in] items
- * Pattern specification (list terminated by the END pattern item).
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- * @param[in, out] parser
- * Internal parser structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] shared
+ * Indicate if this counter is shared with other flows.
+ * @param[in] id
+ * Counter identifier.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * A pointer to the counter, NULL otherwise and rte_errno is set.
*/
-static int
-mlx5_flow_convert_items_validate(struct rte_eth_dev *dev,
- const struct rte_flow_item items[],
- struct rte_flow_error *error,
- struct mlx5_flow_parse *parser)
+static struct mlx5_flow_counter *
+mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
{
struct priv *priv = dev->data->dev_private;
- const struct mlx5_flow_items *cur_item = mlx5_flow_items;
- unsigned int i;
- unsigned int last_voids = 0;
- int ret = 0;
-
- /* Initialise the offsets to start after verbs attribute. */
- for (i = 0; i != hash_rxq_init_n; ++i)
- parser->queue[i].offset = sizeof(struct ibv_flow_attr);
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
- const struct mlx5_flow_items *token = NULL;
- unsigned int n;
+ struct mlx5_flow_counter *cnt;
- if (items->type == RTE_FLOW_ITEM_TYPE_VOID) {
- last_voids++;
+ LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ if (!cnt->shared || cnt->shared != shared)
continue;
- }
- for (i = 0;
- cur_item->items &&
- cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
- ++i) {
- if (cur_item->items[i] == items->type) {
- token = &mlx5_flow_items[items->type];
- break;
- }
- }
- if (!token) {
- ret = -ENOTSUP;
- goto exit_item_not_supported;
- }
- cur_item = token;
- ret = mlx5_flow_item_validate(items,
- (const uint8_t *)cur_item->mask,
- cur_item->mask_sz);
- if (ret)
- goto exit_item_not_supported;
- if (IS_TUNNEL(items->type)) {
- if (parser->tunnel &&
- !((items - last_voids - 1)->type ==
- RTE_FLOW_ITEM_TYPE_GRE && items->type ==
- RTE_FLOW_ITEM_TYPE_MPLS)) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "Cannot recognize multiple"
- " tunnel encapsulations.");
- return -rte_errno;
- }
- if (items->type == RTE_FLOW_ITEM_TYPE_MPLS &&
- !priv->config.mpls_en) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "MPLS not supported or"
- " disabled in firmware"
- " configuration.");
- return -rte_errno;
- }
- if (!priv->config.tunnel_en &&
- parser->rss_conf.level > 1) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "RSS on tunnel is not supported");
- return -rte_errno;
- }
- parser->inner = IBV_FLOW_SPEC_INNER;
- parser->tunnel = flow_ptype[items->type];
- }
- if (parser->drop) {
- parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
- } else {
- for (n = 0; n != hash_rxq_init_n; ++n)
- parser->queue[n].offset += cur_item->dst_sz;
- }
- last_voids = 0;
- }
- if (parser->drop) {
- parser->queue[HASH_RXQ_ETH].offset +=
- sizeof(struct ibv_flow_spec_action_drop);
- }
- if (parser->mark) {
- for (i = 0; i != hash_rxq_init_n; ++i)
- parser->queue[i].offset +=
- sizeof(struct ibv_flow_spec_action_tag);
- }
- if (parser->count) {
- unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
-
- for (i = 0; i != hash_rxq_init_n; ++i)
- parser->queue[i].offset += size;
+ if (cnt->id != id)
+ continue;
+ cnt->ref_cnt++;
+ return cnt;
}
- return 0;
-exit_item_not_supported:
- return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM,
- items, "item not supported");
-}
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-/**
- * Allocate memory space to store verbs flow attributes.
- *
- * @param[in] size
- * Amount of byte to allocate.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * A verbs flow attribute on success, NULL otherwise and rte_errno is set.
- */
-static struct ibv_flow_attr *
-mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error)
-{
- struct ibv_flow_attr *ibv_attr;
+ struct mlx5_flow_counter tmpl = {
+ .shared = shared,
+ .id = id,
+ .cs = mlx5_glue->create_counter_set
+ (priv->ctx,
+ &(struct ibv_counter_set_init_attr){
+ .counter_set_id = id,
+ }),
+ .hits = 0,
+ .bytes = 0,
+ };
- ibv_attr = rte_calloc(__func__, 1, size, 0);
- if (!ibv_attr) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot allocate verbs spec attributes");
+ if (!tmpl.cs) {
+ rte_errno = errno;
return NULL;
}
- return ibv_attr;
-}
-
-/**
- * Make inner packet matching with an higher priority from the non Inner
- * matching.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param[in, out] parser
- * Internal parser structure.
- * @param attr
- * User flow attribute.
- */
-static void
-mlx5_flow_update_priority(struct rte_eth_dev *dev,
- struct mlx5_flow_parse *parser,
- const struct rte_flow_attr *attr)
-{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
- uint16_t priority;
-
- /* 8 priorities >= 16 priorities
- * Control flow: 4-7 8-15
- * User normal flow: 1-3 4-7
- * User tunnel flow: 0-2 0-3
- */
- priority = attr->priority * MLX5_VERBS_FLOW_PRIO_8;
- if (priv->config.max_verbs_prio == MLX5_VERBS_FLOW_PRIO_8)
- priority /= 2;
- /*
- * Lower non-tunnel flow Verbs priority 1 if only support 8 Verbs
- * priorities, lower 4 otherwise.
- */
- if (!parser->inner) {
- if (priv->config.max_verbs_prio == MLX5_VERBS_FLOW_PRIO_8)
- priority += 1;
- else
- priority += MLX5_VERBS_FLOW_PRIO_8 / 2;
- }
- if (parser->drop) {
- parser->queue[HASH_RXQ_ETH].ibv_attr->priority = priority +
- hash_rxq_init[HASH_RXQ_ETH].flow_priority;
- return;
- }
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!parser->queue[i].ibv_attr)
- continue;
- parser->queue[i].ibv_attr->priority = priority +
- hash_rxq_init[i].flow_priority;
+ cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+ if (!cnt) {
+ rte_errno = ENOMEM;
+ return NULL;
}
+ *cnt = tmpl;
+ LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ return cnt;
+#endif
+ rte_errno = ENOTSUP;
+ return NULL;
}
/**
- * Finalise verbs flow attributes.
+ * Release a flow counter.
*
- * @param[in, out] parser
- * Internal parser structure.
+ * @param[in] counter
+ * Pointer to the counter handler.
*/
static void
-mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser)
+mlx5_flow_counter_release(struct mlx5_flow_counter *counter)
{
- unsigned int i;
- uint32_t inner = parser->inner;
-
- /* Don't create extra flows for outer RSS. */
- if (parser->tunnel && parser->rss_conf.level < 2)
- return;
- /*
- * Fill missing layers in verbs specifications, or compute the correct
- * offset to allocate the memory space for the attributes and
- * specifications.
- */
- for (i = 0; i != hash_rxq_init_n - 1; ++i) {
- union {
- struct ibv_flow_spec_ipv4_ext ipv4;
- struct ibv_flow_spec_ipv6 ipv6;
- struct ibv_flow_spec_tcp_udp udp_tcp;
- struct ibv_flow_spec_eth eth;
- } specs;
- void *dst;
- uint16_t size;
-
- if (i == parser->layer)
- continue;
- if (parser->layer == HASH_RXQ_ETH ||
- parser->layer == HASH_RXQ_TUNNEL) {
- if (hash_rxq_init[i].ip_version == MLX5_IPV4) {
- size = sizeof(struct ibv_flow_spec_ipv4_ext);
- specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){
- .type = inner | IBV_FLOW_SPEC_IPV4_EXT,
- .size = size,
- };
- } else {
- size = sizeof(struct ibv_flow_spec_ipv6);
- specs.ipv6 = (struct ibv_flow_spec_ipv6){
- .type = inner | IBV_FLOW_SPEC_IPV6,
- .size = size,
- };
- }
- if (parser->queue[i].ibv_attr) {
- dst = (void *)((uintptr_t)
- parser->queue[i].ibv_attr +
- parser->queue[i].offset);
- memcpy(dst, &specs, size);
- ++parser->queue[i].ibv_attr->num_of_specs;
- }
- parser->queue[i].offset += size;
- }
- if ((i == HASH_RXQ_UDPV4) || (i == HASH_RXQ_TCPV4) ||
- (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) {
- size = sizeof(struct ibv_flow_spec_tcp_udp);
- specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) {
- .type = inner | ((i == HASH_RXQ_UDPV4 ||
- i == HASH_RXQ_UDPV6) ?
- IBV_FLOW_SPEC_UDP :
- IBV_FLOW_SPEC_TCP),
- .size = size,
- };
- if (parser->queue[i].ibv_attr) {
- dst = (void *)((uintptr_t)
- parser->queue[i].ibv_attr +
- parser->queue[i].offset);
- memcpy(dst, &specs, size);
- ++parser->queue[i].ibv_attr->num_of_specs;
- }
- parser->queue[i].offset += size;
- }
+ if (--counter->ref_cnt == 0) {
+ claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
+ LIST_REMOVE(counter, next);
+ rte_free(counter);
}
}
/**
- * Update flows according to pattern and RSS hash fields.
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
*
- * @param[in, out] parser
- * Internal parser structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_flow_convert_rss(struct mlx5_flow_parse *parser)
+mlx5_flow_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
{
- unsigned int i;
- enum hash_rxq_type start;
- enum hash_rxq_type layer;
- int outer = parser->tunnel && parser->rss_conf.level < 2;
- uint64_t rss = parser->rss_conf.types;
-
- layer = outer ? parser->out_layer : parser->layer;
- if (layer == HASH_RXQ_TUNNEL)
- layer = HASH_RXQ_ETH;
- if (outer) {
- /* Only one hash type for outer RSS. */
- if (rss && layer == HASH_RXQ_ETH) {
- start = HASH_RXQ_TCPV4;
- } else if (rss && layer != HASH_RXQ_ETH &&
- !(rss & hash_rxq_init[layer].dpdk_rss_hf)) {
- /* If RSS not match L4 pattern, try L3 RSS. */
- if (layer < HASH_RXQ_IPV4)
- layer = HASH_RXQ_IPV4;
- else if (layer > HASH_RXQ_IPV4 && layer < HASH_RXQ_IPV6)
- layer = HASH_RXQ_IPV6;
- start = layer;
- } else {
- start = layer;
- }
- /* Scan first valid hash type. */
- for (i = start; rss && i <= layer; ++i) {
- if (!parser->queue[i].ibv_attr)
- continue;
- if (hash_rxq_init[i].dpdk_rss_hf & rss)
- break;
- }
- if (rss && i <= layer)
- parser->queue[layer].hash_fields =
- hash_rxq_init[i].hash_fields;
- /* Trim unused hash types. */
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser->queue[i].ibv_attr && i != layer) {
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
- }
- }
- } else {
- /* Expand for inner or normal RSS. */
- if (rss && (layer == HASH_RXQ_ETH || layer == HASH_RXQ_IPV4))
- start = HASH_RXQ_TCPV4;
- else if (rss && layer == HASH_RXQ_IPV6)
- start = HASH_RXQ_TCPV6;
- else
- start = layer;
- /* For L4 pattern, try L3 RSS if no L4 RSS. */
- /* Trim unused hash types. */
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!parser->queue[i].ibv_attr)
- continue;
- if (i < start || i > layer) {
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
- continue;
- }
- if (!rss)
- continue;
- if (hash_rxq_init[i].dpdk_rss_hf & rss) {
- parser->queue[i].hash_fields =
- hash_rxq_init[i].hash_fields;
- } else if (i != layer) {
- /* Remove unused RSS expansion. */
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
- } else if (layer < HASH_RXQ_IPV4 &&
- (hash_rxq_init[HASH_RXQ_IPV4].dpdk_rss_hf &
- rss)) {
- /* Allow IPv4 RSS on L4 pattern. */
- parser->queue[i].hash_fields =
- hash_rxq_init[HASH_RXQ_IPV4]
- .hash_fields;
- } else if (i > HASH_RXQ_IPV4 && i < HASH_RXQ_IPV6 &&
- (hash_rxq_init[HASH_RXQ_IPV6].dpdk_rss_hf &
- rss)) {
- /* Allow IPv4 RSS on L4 pattern. */
- parser->queue[i].hash_fields =
- hash_rxq_init[HASH_RXQ_IPV6]
- .hash_fields;
- }
- }
- }
+ uint32_t priority_max =
+ ((struct priv *)dev->data->dev_private)->config.flow_prio - 1;
+
+ if (attributes->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups is not supported");
+ if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ attributes->priority >= priority_max)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priority out of range");
+ if (attributes->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ if (attributes->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ if (!attributes->ingress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "ingress attribute is mandatory");
+ flow->attributes = *attributes;
+ if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
+ flow->attributes.priority = priority_max;
return 0;
}
/**
- * Validate and convert a flow supported by the NIC.
+ * Verify the @p item specifications (spec, last, mask) are compatible with the
+ * NIC capabilities.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param[in] attr
- * Flow rule attributes.
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END action).
+ * @param[in] item
+ * Item specification.
+ * @param[in] mask
+ * @p item->mask or flow default bit-masks.
+ * @param[in] nic_mask
+ * Bit-masks covering supported fields by the NIC to compare with user mask.
+ * @param[in] size
+ * Bit-masks size in bytes.
* @param[out] error
- * Perform verbose error reporting if not NULL.
- * @param[in, out] parser
- * Internal parser structure.
+ * Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_flow_convert(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error,
- struct mlx5_flow_parse *parser)
+mlx5_flow_item_acceptable(const struct rte_flow_item *item,
+ const uint8_t *mask,
+ const uint8_t *nic_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
{
- const struct mlx5_flow_items *cur_item = mlx5_flow_items;
unsigned int i;
- int ret;
- /* First step. Validate the attributes, items and actions. */
- *parser = (struct mlx5_flow_parse){
- .create = parser->create,
- .layer = HASH_RXQ_ETH,
- .mark_id = MLX5_FLOW_MARK_DEFAULT,
- };
- ret = mlx5_flow_convert_attributes(attr, error);
- if (ret)
- return ret;
- ret = mlx5_flow_convert_actions(dev, actions, error, parser);
- if (ret)
- return ret;
- ret = mlx5_flow_convert_items_validate(dev, items, error, parser);
- if (ret)
- return ret;
- mlx5_flow_convert_finalise(parser);
- /*
- * Second step.
- * Allocate the memory space to store verbs specifications.
- */
- if (parser->drop) {
- unsigned int offset = parser->queue[HASH_RXQ_ETH].offset;
-
- parser->queue[HASH_RXQ_ETH].ibv_attr =
- mlx5_flow_convert_allocate(offset, error);
- if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
- goto exit_enomem;
- parser->queue[HASH_RXQ_ETH].offset =
- sizeof(struct ibv_flow_attr);
- } else {
- for (i = 0; i != hash_rxq_init_n; ++i) {
- unsigned int offset;
-
- offset = parser->queue[i].offset;
- parser->queue[i].ibv_attr =
- mlx5_flow_convert_allocate(offset, error);
- if (!parser->queue[i].ibv_attr)
- goto exit_enomem;
- parser->queue[i].offset = sizeof(struct ibv_flow_attr);
- }
- }
- /* Third step. Conversion parse, fill the specifications. */
- parser->inner = 0;
- parser->tunnel = 0;
- parser->layer = HASH_RXQ_ETH;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
- struct mlx5_flow_data data = {
- .dev = dev,
- .parser = parser,
- .error = error,
- };
+ assert(nic_mask);
+ for (i = 0; i < size; ++i)
+ if ((nic_mask[i] | mask[i]) != nic_mask[i])
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "mask enables non supported"
+ " bits");
+ if (!item->spec && (item->mask || item->last))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "mask/last without a spec is not"
+ " supported");
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ unsigned int i;
+ int ret;
- if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
- continue;
- cur_item = &mlx5_flow_items[items->type];
- ret = cur_item->convert(items,
- (cur_item->default_mask ?
- cur_item->default_mask :
- cur_item->mask),
- &data);
- if (ret)
- goto exit_free;
- }
- if (!parser->drop) {
- /* RSS check, remove unused hash types. */
- ret = mlx5_flow_convert_rss(parser);
- if (ret)
- goto exit_free;
- /* Complete missing specification. */
- mlx5_flow_convert_finalise(parser);
- }
- mlx5_flow_update_priority(dev, parser, attr);
- if (parser->mark)
- mlx5_flow_create_flag_mark(parser, parser->mark_id);
- if (parser->count && parser->create) {
- mlx5_flow_create_count(dev, parser);
- if (!parser->cs)
- goto exit_count_error;
- }
-exit_free:
- /* Only verification is expected, all resources should be released. */
- if (!parser->create) {
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser->queue[i].ibv_attr) {
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
- }
- }
- }
- return ret;
-exit_enomem:
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser->queue[i].ibv_attr) {
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
+ last[i] = ((const uint8_t *)item->last)[i] & mask[i];
}
+ ret = memcmp(spec, last, size);
+ if (ret != 0)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "range is not supported");
}
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot allocate verbs spec attributes");
- return -rte_errno;
-exit_count_error:
- rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create counter");
- return -rte_errno;
+ return 0;
}
/**
- * Copy the specification created into the flow.
+ * Add a verbs item specification into @p flow.
*
- * @param parser
- * Internal parser structure.
- * @param src
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] src
* Create specification.
- * @param size
+ * @param[in] size
* Size in bytes of the specification to copy.
*/
static void
-mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
- unsigned int size)
+mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
{
- unsigned int i;
- void *dst;
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!parser->queue[i].ibv_attr)
- continue;
- dst = (void *)((uintptr_t)parser->queue[i].ibv_attr +
- parser->queue[i].offset);
+ if (verbs->specs) {
+ void *dst;
+
+ dst = (void *)(verbs->specs + verbs->size);
memcpy(dst, src, size);
- ++parser->queue[i].ibv_attr->num_of_specs;
- parser->queue[i].offset += size;
+ ++verbs->attr->num_of_specs;
}
+ verbs->size += size;
+}
+
+/**
+ * Adjust verbs hash fields according to the @p flow information.
+ *
+ * @param[in, out] flow.
+ * Pointer to flow structure.
+ * @param[in] tunnel
+ * 1 when the hash field is for a tunnel item.
+ * @param[in] layer_types
+ * ETH_RSS_* types.
+ * @param[in] hash_fields
+ * Item hash fields.
+ */
+static void
+mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
+ int tunnel __rte_unused,
+ uint32_t layer_types, uint64_t hash_fields)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
+ if (flow->rss.level == 2 && !tunnel)
+ hash_fields = 0;
+ else if (flow->rss.level < 2 && tunnel)
+ hash_fields = 0;
+#endif
+ if (!(flow->rss.types & layer_types))
+ hash_fields = 0;
+ flow->cur_verbs->hash_fields |= hash_fields;
}
/**
- * Convert Ethernet item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_eth(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data)
+mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_eth *spec = item->spec;
const struct rte_flow_item_eth *mask = item->mask;
- struct mlx5_flow_parse *parser = data->parser;
- const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+ const struct rte_flow_item_eth nic_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = RTE_BE16(0xffff),
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const unsigned int size = sizeof(struct ibv_flow_spec_eth);
struct ibv_flow_spec_eth eth = {
- .type = parser->inner | IBV_FLOW_SPEC_ETH,
- .size = eth_size,
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
};
+ int ret;
- parser->layer = HASH_RXQ_ETH;
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L2 layers already configured");
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_eth),
+ error);
+ if (ret)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ if (size > flow_size)
+ return size;
if (spec) {
unsigned int i;
- if (!mask)
- mask = default_mask;
memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
eth.val.ether_type = spec->type;
@@ -1459,112 +824,211 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
}
eth.val.ether_type &= eth.mask.ether_type;
}
- mlx5_flow_create_copy(parser, &eth, eth_size);
- return 0;
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ mlx5_flow_spec_verbs_add(flow, &eth, size);
+ return size;
+}
+
+/**
+ * Update the VLAN tag in the Verbs Ethernet specification.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] eth
+ * Verbs structure containing the VLAN information to copy.
+ */
+static void
+mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
+ struct ibv_flow_spec_eth *eth)
+{
+ unsigned int i;
+ const enum ibv_flow_spec_type search = eth->type;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ struct ibv_flow_spec_eth *e =
+ (struct ibv_flow_spec_eth *)hdr;
+
+ e->val.vlan_tag = eth->val.vlan_tag;
+ e->mask.vlan_tag = eth->mask.vlan_tag;
+ e->val.ether_type = eth->val.ether_type;
+ e->mask.ether_type = eth->mask.ether_type;
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
}
/**
- * Convert VLAN item to Verbs specification.
+ * Convert the @p item into @p flow (or by updating the already present
+ * Ethernet Verbs) specification after ensuring the NIC will understand and
+ * process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_vlan(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data)
+mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
- struct mlx5_flow_parse *parser = data->parser;
- struct ibv_flow_spec_eth *eth;
- const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
- const char *msg = "VLAN cannot be empty";
-
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(0x0fff),
+ .inner_type = RTE_BE16(0xffff),
+ };
+ unsigned int size = sizeof(struct ibv_flow_spec_eth);
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct ibv_flow_spec_eth eth = {
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+ const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+ MLX5_FLOW_LAYER_INNER_L4) :
+ (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);
+ const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+
+ if (flow->layers & vlanm)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN layer already configured");
+ else if ((flow->layers & l34m) != 0)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L2 layer cannot follow L3/L4 layer");
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_vlan), error);
+ if (ret)
+ return ret;
if (spec) {
- unsigned int i;
- if (!mask)
- mask = default_mask;
-
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!parser->queue[i].ibv_attr)
- continue;
-
- eth = (void *)((uintptr_t)parser->queue[i].ibv_attr +
- parser->queue[i].offset - eth_size);
- eth->val.vlan_tag = spec->tci;
- eth->mask.vlan_tag = mask->tci;
- eth->val.vlan_tag &= eth->mask.vlan_tag;
- /*
- * From verbs perspective an empty VLAN is equivalent
- * to a packet without VLAN layer.
- */
- if (!eth->mask.vlan_tag)
- goto error;
- /* Outer TPID cannot be matched. */
- if (eth->mask.ether_type) {
- msg = "VLAN TPID matching is not supported";
- goto error;
- }
- eth->val.ether_type = spec->inner_type;
- eth->mask.ether_type = mask->inner_type;
- eth->val.ether_type &= eth->mask.ether_type;
- }
- return 0;
+ eth.val.vlan_tag = spec->tci;
+ eth.mask.vlan_tag = mask->tci;
+ eth.val.vlan_tag &= eth.mask.vlan_tag;
+ eth.val.ether_type = spec->inner_type;
+ eth.mask.ether_type = mask->inner_type;
+ eth.val.ether_type &= eth.mask.ether_type;
}
-error:
- return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
- item, msg);
+ /*
+ * From verbs perspective an empty VLAN is equivalent
+ * to a packet without VLAN layer.
+ */
+ if (!eth.mask.vlan_tag)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "VLAN cannot be empty");
+ if (!(flow->layers & l2m)) {
+ if (size <= flow_size) {
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ mlx5_flow_spec_verbs_add(flow, &eth, size);
+ }
+ } else {
+ if (flow->cur_verbs)
+ mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
+ &eth);
+ size = 0; /* Only an update is done in eth specification. */
+ }
+ flow->layers |= tunnel ?
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
+ return size;
}
/**
- * Convert IPv4 item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_ipv4(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data)
+mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
- struct priv *priv = data->dev->data->dev_private;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
- struct mlx5_flow_parse *parser = data->parser;
- unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
+ const struct rte_flow_item_ipv4 nic_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ },
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
struct ibv_flow_spec_ipv4_ext ipv4 = {
- .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT,
- .size = ipv4_size,
+ .type = IBV_FLOW_SPEC_IPV4_EXT |
+ (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
};
+ int ret;
- if (parser->layer == HASH_RXQ_TUNNEL &&
- parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] &&
- !priv->config.l3_vxlan_en)
- return rte_flow_error_set(data->error, EINVAL,
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple L3 layers not supported");
+ else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "L3 VXLAN not enabled by device"
- " parameter and/or not configured"
- " in firmware");
- parser->layer = HASH_RXQ_IPV4;
+ "L3 cannot follow an L4 layer.");
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (spec) {
- if (!mask)
- mask = default_mask;
ipv4.val = (struct ibv_flow_ipv4_ext_filter){
.src_ip = spec->hdr.src_addr,
.dst_ip = spec->hdr.dst_addr,
@@ -1583,55 +1047,108 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
ipv4.val.proto &= ipv4.mask.proto;
ipv4.val.tos &= ipv4.mask.tos;
}
- mlx5_flow_create_copy(parser, &ipv4, ipv4_size);
- return 0;
+ flow->l3_protocol_en = !!ipv4.mask.proto;
+ flow->l3_protocol = ipv4.val.proto;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust
+ (flow, tunnel,
+ (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER),
+ (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
+ mlx5_flow_spec_verbs_add(flow, &ipv4, size);
+ }
+ return size;
}
/**
- * Convert IPv6 item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_ipv6(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data)
+mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
- struct priv *priv = data->dev->data->dev_private;
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
- struct mlx5_flow_parse *parser = data->parser;
- unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
+ const struct rte_flow_item_ipv6 nic_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
struct ibv_flow_spec_ipv6 ipv6 = {
- .type = parser->inner | IBV_FLOW_SPEC_IPV6,
- .size = ipv6_size,
+ .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
};
+ int ret;
- if (parser->layer == HASH_RXQ_TUNNEL &&
- parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] &&
- !priv->config.l3_vxlan_en)
- return rte_flow_error_set(data->error, EINVAL,
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple L3 layers not supported");
+ else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 cannot follow an L4 layer.");
+ /*
+ * IPv6 is not recognised by the NIC inside a GRE tunnel.
+ * Such support has to be disabled as the rule will be
+ * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
+ * Mellanox OFED 4.4-1.0.0.0.
+ */
+ if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "L3 VXLAN not enabled by device"
- " parameter and/or not configured"
- " in firmware");
- parser->layer = HASH_RXQ_IPV6;
+ "IPv6 inside a GRE tunnel is"
+ " not recognised.");
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv6), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (spec) {
unsigned int i;
uint32_t vtc_flow_val;
uint32_t vtc_flow_mask;
- if (!mask)
- mask = default_mask;
memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
RTE_DIM(ipv6.val.src_ip));
memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
@@ -1666,44 +1183,86 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
ipv6.val.next_hdr &= ipv6.mask.next_hdr;
ipv6.val.hop_limit &= ipv6.mask.hop_limit;
}
- mlx5_flow_create_copy(parser, &ipv6, ipv6_size);
- return 0;
+ flow->l3_protocol_en = !!ipv6.mask.next_hdr;
+ flow->l3_protocol = ipv6.val.next_hdr;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust
+ (flow, tunnel,
+ (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER),
+ (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
+ mlx5_flow_spec_verbs_add(flow, &ipv6, size);
+ }
+ return size;
}
/**
- * Convert UDP item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_udp(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data)
+mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
- struct mlx5_flow_parse *parser = data->parser;
- unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
struct ibv_flow_spec_tcp_udp udp = {
- .type = parser->inner | IBV_FLOW_SPEC_UDP,
- .size = udp_size,
+ .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
};
+ int ret;
- if (parser->layer == HASH_RXQ_IPV4)
- parser->layer = HASH_RXQ_UDPV4;
- else
- parser->layer = HASH_RXQ_UDPV6;
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with UDP layer");
+ if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 is mandatory to filter"
+ " on L4");
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L4 layer is already"
+ " present");
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
if (spec) {
- if (!mask)
- mask = default_mask;
udp.val.dst_port = spec->hdr.dst_port;
udp.val.src_port = spec->hdr.src_port;
udp.mask.dst_port = mask->hdr.dst_port;
@@ -1712,44 +1271,81 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
udp.val.src_port &= udp.mask.src_port;
udp.val.dst_port &= udp.mask.dst_port;
}
- mlx5_flow_create_copy(parser, &udp, udp_size);
- return 0;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
+ (IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
+ mlx5_flow_spec_verbs_add(flow, &udp, size);
+ }
+ return size;
}
/**
- * Convert TCP item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_tcp(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data)
+mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
- struct mlx5_flow_parse *parser = data->parser;
- unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
struct ibv_flow_spec_tcp_udp tcp = {
- .type = parser->inner | IBV_FLOW_SPEC_TCP,
- .size = tcp_size,
+ .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
};
+ int ret;
- if (parser->layer == HASH_RXQ_IPV4)
- parser->layer = HASH_RXQ_TCPV4;
- else
- parser->layer = HASH_RXQ_TCPV6;
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with TCP layer");
+ if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 is mandatory to filter on L4");
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L4 layer is already present");
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_tcp_mask,
+ sizeof(struct rte_flow_item_tcp), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
if (spec) {
- if (!mask)
- mask = default_mask;
tcp.val.dst_port = spec->hdr.dst_port;
tcp.val.src_port = spec->hdr.src_port;
tcp.mask.dst_port = mask->hdr.dst_port;
@@ -1758,49 +1354,78 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
tcp.val.src_port &= tcp.mask.src_port;
tcp.val.dst_port &= tcp.mask.dst_port;
}
- mlx5_flow_create_copy(parser, &tcp, tcp_size);
- return 0;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
+ (IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
+ mlx5_flow_spec_verbs_add(flow, &tcp, size);
+ }
+ return size;
}
/**
- * Convert VXLAN item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_vxlan(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data)
+mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
- struct mlx5_flow_parse *parser = data->parser;
unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
struct ibv_flow_spec_tunnel vxlan = {
- .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
.size = size,
};
+ int ret;
union vni {
uint32_t vlan_id;
uint8_t vni[4];
- } id;
+ } id = { .vlan_id = 0, };
- id.vni[0] = 0;
- parser->inner = IBV_FLOW_SPEC_INNER;
- parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)];
- parser->out_layer = parser->layer;
- parser->layer = HASH_RXQ_TUNNEL;
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan), error);
+ if (ret < 0)
+ return ret;
if (spec) {
- if (!mask)
- mask = default_mask;
memcpy(&id.vni[1], spec->vni, 3);
vxlan.val.tunnel_id = id.vlan_id;
memcpy(&id.vni[1], mask->vni, 3);
@@ -1809,148 +1434,272 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
}
/*
- * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
- * layer is defined in the Verbs specification it is interpreted as
- * wildcard and all packets will match this rule, if it follows a full
- * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
- * before will also match this rule.
- * To avoid such situation, VNI 0 is currently refused.
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
+ * only this layer is defined in the Verbs specification it is
+ * interpreted as wildcard and all packets will match this
+ * rule, if it follows a full stack layer (ex: eth / ipv4 /
+ * udp), all packets matching the layers before will also
+ * match this rule. To avoid such situation, VNI 0 is
+ * currently refused.
*/
- /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */
- if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id)
- return rte_flow_error_set(data->error, EINVAL,
+ if (!vxlan.val.tunnel_id)
+ return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "VxLAN vni cannot be 0");
- mlx5_flow_create_copy(parser, &vxlan, size);
- return 0;
+ "VXLAN vni cannot be 0");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN tunnel must be fully defined");
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &vxlan, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_VXLAN;
+ return size;
}
/**
- * Convert VXLAN-GPE item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data)
+mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
{
- struct priv *priv = data->dev->data->dev_private;
const struct rte_flow_item_vxlan_gpe *spec = item->spec;
const struct rte_flow_item_vxlan_gpe *mask = item->mask;
- struct mlx5_flow_parse *parser = data->parser;
unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel vxlan = {
- .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ struct ibv_flow_spec_tunnel vxlan_gpe = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
.size = size,
};
+ int ret;
union vni {
uint32_t vlan_id;
uint8_t vni[4];
- } id;
+ } id = { .vlan_id = 0, };
- if (!priv->config.l3_vxlan_en)
- return rte_flow_error_set(data->error, EINVAL,
+ if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)
+ return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "L3 VXLAN not enabled by device"
- " parameter and/or not configured"
- " in firmware");
- id.vni[0] = 0;
- parser->inner = IBV_FLOW_SPEC_INNER;
- parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)];
- parser->out_layer = parser->layer;
- parser->layer = HASH_RXQ_TUNNEL;
+ "L3 VXLAN is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_gpe_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
+ sizeof(struct rte_flow_item_vxlan_gpe), error);
+ if (ret < 0)
+ return ret;
if (spec) {
- if (!mask)
- mask = default_mask;
memcpy(&id.vni[1], spec->vni, 3);
- vxlan.val.tunnel_id = id.vlan_id;
+ vxlan_gpe.val.tunnel_id = id.vlan_id;
memcpy(&id.vni[1], mask->vni, 3);
- vxlan.mask.tunnel_id = id.vlan_id;
+ vxlan_gpe.mask.tunnel_id = id.vlan_id;
if (spec->protocol)
- return rte_flow_error_set(data->error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VxLAN-GPE protocol not"
- " supported");
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN-GPE protocol not supported");
/* Remove unwanted bits from values. */
- vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
}
/*
* Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
* layer is defined in the Verbs specification it is interpreted as
* wildcard and all packets will match this rule, if it follows a full
* stack layer (ex: eth / ipv4 / udp), all packets matching the layers
- * before will also match this rule.
- * To avoid such situation, VNI 0 is currently refused.
+ * before will also match this rule. To avoid such situation, VNI 0
+ * is currently refused.
*/
- /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */
- if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id)
- return rte_flow_error_set(data->error, EINVAL,
+ if (!vxlan_gpe.val.tunnel_id)
+ return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "VxLAN-GPE vni cannot be 0");
- mlx5_flow_create_copy(parser, &vxlan, size);
- return 0;
+ "VXLAN-GPE vni cannot be 0");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN-GPE tunnel must be fully"
+ " defined");
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ return size;
+}
+
+/**
+ * Update the protocol in Verbs IPv4/IPv6 spec.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] search
+ * Specification type to search in order to update the IP protocol.
+ * @param[in] protocol
+ * Protocol value to set if none is present in the specification.
+ */
+static void
+mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
+ enum ibv_flow_spec_type search,
+ uint8_t protocol)
+{
+ unsigned int i;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ if (!attr)
+ return;
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ union {
+ struct ibv_flow_spec_ipv4_ext *ipv4;
+ struct ibv_flow_spec_ipv6 *ipv6;
+ } ip;
+
+ switch (search) {
+ case IBV_FLOW_SPEC_IPV4_EXT:
+ ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
+ if (!ip.ipv4->val.proto) {
+ ip.ipv4->val.proto = protocol;
+ ip.ipv4->mask.proto = 0xff;
+ }
+ break;
+ case IBV_FLOW_SPEC_IPV6:
+ ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
+ if (!ip.ipv6->val.next_hdr) {
+ ip.ipv6->val.next_hdr = protocol;
+ ip.ipv6->mask.next_hdr = 0xff;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
}
/**
- * Convert GRE item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * It will also update the previous L3 layer with the protocol value matching
+ * the GRE.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_gre(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data)
+mlx5_flow_item_gre(const struct rte_flow_item *item,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
{
- struct mlx5_flow_parse *parser = data->parser;
-#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
- (void)default_mask;
- unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
- struct ibv_flow_spec_tunnel tunnel = {
- .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
-#else
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
const struct rte_flow_item_gre *spec = item->spec;
const struct rte_flow_item_gre *mask = item->mask;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
unsigned int size = sizeof(struct ibv_flow_spec_gre);
struct ibv_flow_spec_gre tunnel = {
- .type = parser->inner | IBV_FLOW_SPEC_GRE,
+ .type = IBV_FLOW_SPEC_GRE,
+ .size = size,
+ };
+#else
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel tunnel = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
.size = size,
};
#endif
- struct ibv_flow_spec_ipv4_ext *ipv4;
- struct ibv_flow_spec_ipv6 *ipv6;
- unsigned int i;
+ int ret;
- parser->inner = IBV_FLOW_SPEC_INNER;
- parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)];
- parser->out_layer = parser->layer;
- parser->layer = HASH_RXQ_TUNNEL;
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with this GRE layer");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 Layer is missing");
+ if (!mask)
+ mask = &rte_flow_item_gre_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_gre_mask,
+ sizeof(struct rte_flow_item_gre), error);
+ if (ret < 0)
+ return ret;
#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
if (spec) {
- if (!mask)
- mask = default_mask;
tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
tunnel.val.protocol = spec->protocol;
tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
@@ -1960,480 +1709,1288 @@ mlx5_flow_create_gre(const struct rte_flow_item *item,
tunnel.val.protocol &= tunnel.mask.protocol;
tunnel.val.key &= tunnel.mask.key;
}
-#endif
- /* Update encapsulation IP layer protocol. */
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!parser->queue[i].ibv_attr)
- continue;
- if (parser->out_layer == HASH_RXQ_IPV4) {
- ipv4 = (void *)((uintptr_t)parser->queue[i].ibv_attr +
- parser->queue[i].offset -
- sizeof(struct ibv_flow_spec_ipv4_ext));
- if (ipv4->mask.proto && ipv4->val.proto != MLX5_GRE)
- break;
- ipv4->val.proto = MLX5_GRE;
- ipv4->mask.proto = 0xff;
- } else if (parser->out_layer == HASH_RXQ_IPV6) {
- ipv6 = (void *)((uintptr_t)parser->queue[i].ibv_attr +
- parser->queue[i].offset -
- sizeof(struct ibv_flow_spec_ipv6));
- if (ipv6->mask.next_hdr &&
- ipv6->val.next_hdr != MLX5_GRE)
- break;
- ipv6->val.next_hdr = MLX5_GRE;
- ipv6->mask.next_hdr = 0xff;
- }
- }
- if (i != hash_rxq_init_n)
- return rte_flow_error_set(data->error, EINVAL,
+#else
+ if (spec && (spec->protocol & mask->protocol))
+ return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "IP protocol of GRE must be 47");
- mlx5_flow_create_copy(parser, &tunnel, size);
- return 0;
+ "without MPLS support the"
+ " specification cannot be used for"
+ " filtering");
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ if (size <= flow_size) {
+ if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ mlx5_flow_item_gre_ip_protocol_update
+ (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
+ MLX5_IP_PROTOCOL_GRE);
+ else
+ mlx5_flow_item_gre_ip_protocol_update
+ (verbs->attr, IBV_FLOW_SPEC_IPV6,
+ MLX5_IP_PROTOCOL_GRE);
+ mlx5_flow_spec_verbs_add(flow, &tunnel, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_GRE;
+ return size;
}
/**
- * Convert MPLS item to Verbs specification.
- * MPLS tunnel types currently supported are MPLS-in-GRE and MPLS-in-UDP.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_mpls(const struct rte_flow_item *item,
- const void *default_mask,
- struct mlx5_flow_data *data)
+mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const size_t flow_size __rte_unused,
+ struct rte_flow_error *error)
{
-#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
- (void)default_mask;
- return rte_flow_error_set(data->error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "MPLS is not supported by driver");
-#else
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
const struct rte_flow_item_mpls *spec = item->spec;
const struct rte_flow_item_mpls *mask = item->mask;
- struct mlx5_flow_parse *parser = data->parser;
unsigned int size = sizeof(struct ibv_flow_spec_mpls);
struct ibv_flow_spec_mpls mpls = {
.type = IBV_FLOW_SPEC_MPLS,
.size = size,
};
+ int ret;
- parser->inner = IBV_FLOW_SPEC_INNER;
- if (parser->layer == HASH_RXQ_UDPV4 ||
- parser->layer == HASH_RXQ_UDPV6) {
- parser->tunnel =
- ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)];
- parser->out_layer = parser->layer;
- } else {
- parser->tunnel =
- ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)];
- /* parser->out_layer stays as in GRE out_layer. */
- }
- parser->layer = HASH_RXQ_TUNNEL;
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with MPLS layer");
+ /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL &&
+ (flow->layers & MLX5_FLOW_LAYER_GRE) != MLX5_FLOW_LAYER_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already"
+ " present");
+ if (!mask)
+ mask = &rte_flow_item_mpls_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_mpls_mask,
+ sizeof(struct rte_flow_item_mpls), error);
+ if (ret < 0)
+ return ret;
if (spec) {
- if (!mask)
- mask = default_mask;
- /*
- * The verbs label field includes the entire MPLS header:
- * bits 0:19 - label value field.
- * bits 20:22 - traffic class field.
- * bits 23 - bottom of stack bit.
- * bits 24:31 - ttl field.
- */
- mpls.val.label = *(const uint32_t *)spec;
- mpls.mask.label = *(const uint32_t *)mask;
- /* Remove unwanted bits from values. */
+ memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
+ memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
+ /* Remove unwanted bits from values. */
mpls.val.label &= mpls.mask.label;
}
- mlx5_flow_create_copy(parser, &mpls, size);
- return 0;
-#endif
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &mpls, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_MPLS;
+ return size;
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MPLS is not supported by Verbs, please"
+ " update.");
}
/**
- * Convert mark/flag action to Verbs specification.
+ * Convert the @p pattern into a Verbs specifications after ensuring the NIC
+ * will understand and process it correctly.
+ * The conversion is performed item per item, each of them is written into
+ * the @p flow if its size is lesser or equal to @p flow_size.
+ * Validation and memory consumption computation are still performed until the
+ * end of @p pattern, unless an error is encountered.
*
- * @param parser
- * Internal parser structure.
- * @param mark_id
- * Mark identifier.
+ * @param[in] pattern
+ * Flow pattern.
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small some
+ * garbage may be present.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @pattern has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_items(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ int remain = flow_size;
+ size_t size = 0;
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ int ret = 0;
+
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_item_eth(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_item_udp(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_item_tcp(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_item_vxlan(pattern, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,
+ remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ ret = mlx5_flow_item_gre(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "item not supported");
+ }
+ if (ret < 0)
+ return ret;
+ if (remain > ret)
+ remain -= ret;
+ else
+ remain = 0;
+ size += ret;
+ }
+ if (!flow->layers) {
+ const struct rte_flow_item item = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ };
+
+ return mlx5_flow_item_eth(&item, flow, flow_size, error);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_drop(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+ struct ibv_flow_spec_action_drop drop = {
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ .size = size,
+ };
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "drop is not compatible with"
+ " flag/mark action");
+ if (size < flow_size)
+ mlx5_flow_spec_verbs_add(flow, &drop, size);
+ flow->fate |= MLX5_FLOW_FATE_DROP;
+ return size;
+}
+
+/**
+ * Convert the @p action into @p flow after ensuring the NIC will understand
+ * and process it correctly.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
+mlx5_flow_action_queue(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_queue *queue = action->conf;
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (queue->index >= priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue index out of range");
+ if (!(*priv->rxqs)[queue->index])
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue is not configured");
+ if (flow->queue)
+ (*flow->queue)[0] = queue->index;
+ flow->rss.queue_num = 1;
+ flow->fate |= MLX5_FLOW_FATE_QUEUE;
+ return 0;
+}
+
+/**
+ * Ensure the @p action will be understood and used correctly by the NIC.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param action[in]
+ * Pointer to flow actions array.
+ * @param flow[in, out]
+ * Pointer to the rte_flow structure.
+ * @param error[in, out]
+ * Pointer to error structure.
+ *
+ * @return
+ * On success @p flow->queue array and @p flow->rss are filled and valid.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_rss(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_rss *rss = action->conf;
+ unsigned int i;
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->func,
+ "RSS hash function not supported");
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (rss->level > 2)
+#else
+ if (rss->level > 1)
+#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->level,
+ "tunnel RSS is not supported");
+ if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too small");
+ if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too large");
+ if (!rss->queue_num)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ rss,
+ "no queues were provided for RSS");
+ if (rss->queue_num > priv->config.ind_table_max_size)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue_num,
+ "number of queues too large");
+ if (rss->types & MLX5_RSS_HF_MASK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->types,
+ "some RSS protocols are not"
+ " supported");
+ for (i = 0; i != rss->queue_num; ++i) {
+ if (rss->queue[i] >= priv->rxqs_n)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ rss,
+ "queue index out of range");
+ if (!(*priv->rxqs)[rss->queue[i]])
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i],
+ "queue is not configured");
+ }
+ if (flow->queue)
+ memcpy((*flow->queue), rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ flow->rss.queue_num = rss->queue_num;
+ memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
+ flow->rss.types = rss->types;
+ flow->rss.level = rss->level;
+ flow->fate |= MLX5_FLOW_FATE_RSS;
+ return 0;
+}
+
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_flag(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
{
unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
struct ibv_flow_spec_action_tag tag = {
.type = IBV_FLOW_SPEC_ACTION_TAG,
.size = size,
- .tag_id = mlx5_flow_mark_set(mark_id),
+ .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
};
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+
+ if (flow->modifier & MLX5_FLOW_MOD_FLAG)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flag action already present");
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flag is not compatible with drop"
+ " action");
+ if (flow->modifier & MLX5_FLOW_MOD_MARK)
+ size = 0;
+ else if (size <= flow_size && verbs)
+ mlx5_flow_spec_verbs_add(flow, &tag, size);
+ flow->modifier |= MLX5_FLOW_MOD_FLAG;
+ return size;
+}
- assert(parser->mark);
- mlx5_flow_create_copy(parser, &tag, size);
- return 0;
+/**
+ * Update verbs specification to modify the flag to mark.
+ *
+ * @param[in, out] verbs
+ * Pointer to the mlx5_flow_verbs structure.
+ * @param[in] mark_id
+ * Mark identifier to replace the flag.
+ */
+static void
+mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
+{
+ struct ibv_spec_header *hdr;
+ int i;
+
+ if (!verbs)
+ return;
+ /* Update Verbs specification. */
+ hdr = (struct ibv_spec_header *)verbs->specs;
+ if (!hdr)
+ return;
+ for (i = 0; i != verbs->attr->num_of_specs; ++i) {
+ if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
+ struct ibv_flow_spec_action_tag *t =
+ (struct ibv_flow_spec_action_tag *)hdr;
+
+ t->tag_id = mlx5_flow_mark_set(mark_id);
+ }
+ hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
+ }
}
/**
- * Convert count action to Verbs specification.
+ * Convert the @p action into @p flow (or by updating the already present
+ * Flag Verbs specification) after ensuring the NIC will understand and
+ * process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param parser
- * Pointer to MLX5 flow parser structure.
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_mark(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_mark *mark = action->conf;
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ };
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+
+ if (!mark)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "configuration cannot be null");
+ if (mark->id >= MLX5_FLOW_MARK_MAX)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &mark->id,
+ "mark id must in 0 <= id < "
+ RTE_STR(MLX5_FLOW_MARK_MAX));
+ if (flow->modifier & MLX5_FLOW_MOD_MARK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "mark action already present");
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "mark is not compatible with drop"
+ " action");
+ if (flow->modifier & MLX5_FLOW_MOD_FLAG) {
+ mlx5_flow_verbs_mark_update(verbs, mark->id);
+ size = 0;
+ } else if (size <= flow_size) {
+ tag.tag_id = mlx5_flow_mark_set(mark->id);
+ mlx5_flow_spec_verbs_add(flow, &tag, size);
+ }
+ flow->modifier |= MLX5_FLOW_MOD_MARK;
+ return size;
+}
+
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param action[in]
+ * Action configuration.
+ * @param flow[in, out]
+ * Pointer to flow structure.
+ * @param flow_size[in]
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param error[int, out]
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
- struct mlx5_flow_parse *parser __rte_unused)
+mlx5_flow_action_count(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ const size_t flow_size __rte_unused,
+ struct rte_flow_error *error)
{
+ const struct rte_flow_action_count *count = action->conf;
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- struct priv *priv = dev->data->dev_private;
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
- struct ibv_counter_set_init_attr init_attr = {0};
struct ibv_flow_spec_counter_action counter = {
.type = IBV_FLOW_SPEC_ACTION_COUNT,
.size = size,
- .counter_set_handle = 0,
};
+#endif
- init_attr.counter_set_id = 0;
- parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr);
- if (!parser->cs) {
- rte_errno = EINVAL;
- return -rte_errno;
- }
- counter.counter_set_handle = parser->cs->handle;
- mlx5_flow_create_copy(parser, &counter, size);
+ if (!flow->counter) {
+ flow->counter = mlx5_flow_counter_new(dev, count->shared,
+ count->id);
+ if (!flow->counter)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "cannot get counter"
+ " context.");
+ }
+ if (!((struct priv *)dev->data->dev_private)->config.flow_counter_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flow counters are not supported.");
+ flow->modifier |= MLX5_FLOW_MOD_COUNT;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ counter.counter_set_handle = flow->counter->cs->handle;
+ if (size <= flow_size)
+ mlx5_flow_spec_verbs_add(flow, &counter, size);
+ return size;
#endif
return 0;
}
/**
- * Complete flow rule creation with a drop queue.
+ * Convert the @p action into @p flow after ensuring the NIC will understand
+ * and process it correctly.
+ * The conversion is performed action per action, each of them is written into
+ * the @p flow if its size is lesser or equal to @p flow_size.
+ * Validation and memory consumption computation are still performed until the
+ * end of @p action, unless an error is encountered.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in] actions
+ * Pointer to flow actions array.
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small some
+ * garbage may be present.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p actions has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_actions(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ size_t size = 0;
+ int remain = flow_size;
+ int ret = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ ret = mlx5_flow_action_flag(actions, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = mlx5_flow_action_mark(actions, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ ret = mlx5_flow_action_drop(actions, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_action_queue(dev, actions, flow, error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mlx5_flow_action_rss(dev, actions, flow, error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_action_count(dev, actions, flow, remain,
+ error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ if (ret < 0)
+ return ret;
+ if (remain > ret)
+ remain -= ret;
+ else
+ remain = 0;
+ size += ret;
+ }
+ if (!flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "no fate action found");
+ return size;
+}
+
+/**
+ * Validate flow rule and fill flow structure accordingly.
*
* @param dev
* Pointer to Ethernet device.
- * @param parser
- * Internal parser structure.
- * @param flow
- * Pointer to the rte_flow.
+ * @param[out] flow
+ * Pointer to flow structure.
+ * @param flow_size
+ * Size of allocated space for @p flow.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * A positive value representing the size of the flow object in bytes
+ * regardless of @p flow_size on success, a negative errno value otherwise
+ * and rte_errno is set.
*/
static int
-mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
- struct mlx5_flow_parse *parser,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+mlx5_flow_merge_switch(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ size_t flow_size,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- struct ibv_flow_spec_action_drop *drop;
- unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+ unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[!n + n];
+ struct mlx5_nl_flow_ptoi ptoi[!n + n + 1];
+ size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t));
+ unsigned int i;
+ unsigned int own = 0;
+ int ret;
- assert(priv->pd);
- assert(priv->ctx);
- flow->drop = 1;
- drop = (void *)((uintptr_t)parser->queue[HASH_RXQ_ETH].ibv_attr +
- parser->queue[HASH_RXQ_ETH].offset);
- *drop = (struct ibv_flow_spec_action_drop){
- .type = IBV_FLOW_SPEC_ACTION_DROP,
- .size = size,
- };
- ++parser->queue[HASH_RXQ_ETH].ibv_attr->num_of_specs;
- parser->queue[HASH_RXQ_ETH].offset += size;
- flow->frxq[HASH_RXQ_ETH].ibv_attr =
- parser->queue[HASH_RXQ_ETH].ibv_attr;
- if (parser->count)
- flow->cs = parser->cs;
- if (!dev->data->dev_started)
- return 0;
- parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
- flow->frxq[HASH_RXQ_ETH].ibv_flow =
- mlx5_glue->create_flow(priv->flow_drop_queue->qp,
- flow->frxq[HASH_RXQ_ETH].ibv_attr);
- if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "flow rule creation failure");
- goto error;
- }
- return 0;
-error:
- assert(flow);
- if (flow->frxq[HASH_RXQ_ETH].ibv_flow) {
- claim_zero(mlx5_glue->destroy_flow
- (flow->frxq[HASH_RXQ_ETH].ibv_flow));
- flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
- }
- if (flow->frxq[HASH_RXQ_ETH].ibv_attr) {
- rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);
- flow->frxq[HASH_RXQ_ETH].ibv_attr = NULL;
+ /* At least one port is needed when no switch domain is present. */
+ if (!n) {
+ n = 1;
+ port_id[0] = dev->data->port_id;
+ } else {
+ n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
+ }
+ for (i = 0; i != n; ++i) {
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(port_id[i], &dev_info);
+ if (port_id[i] == dev->data->port_id)
+ own = i;
+ ptoi[i].port_id = port_id[i];
+ ptoi[i].ifindex = dev_info.if_index;
+ }
+ /* Ensure first entry of ptoi[] is the current device. */
+ if (own) {
+ ptoi[n] = ptoi[0];
+ ptoi[0] = ptoi[own];
+ ptoi[own] = ptoi[n];
+ }
+ /* An entry with zero ifindex terminates ptoi[]. */
+ ptoi[n].port_id = 0;
+ ptoi[n].ifindex = 0;
+ if (flow_size < off)
+ flow_size = 0;
+ ret = mlx5_nl_flow_transpose((uint8_t *)flow + off,
+ flow_size ? flow_size - off : 0,
+ ptoi, attr, pattern, actions, error);
+ if (ret < 0)
+ return ret;
+ if (flow_size) {
+ *flow = (struct rte_flow){
+ .attributes = *attr,
+ .nl_flow = (uint8_t *)flow + off,
+ };
+ /*
+ * Generate a reasonably unique handle based on the address
+ * of the target buffer.
+ *
+ * This is straightforward on 32-bit systems where the flow
+ * pointer can be used directly. Otherwise, its least
+ * significant part is taken after shifting it by the
+ * previous power of two of the pointed buffer size.
+ */
+ if (sizeof(flow) <= 4)
+ mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow);
+ else
+ mlx5_nl_flow_brand
+ (flow->nl_flow,
+ (uintptr_t)flow >>
+ rte_log2_u32(rte_align32prevpow2(flow_size)));
}
- if (flow->cs) {
- claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
- flow->cs = NULL;
- parser->cs = NULL;
+ return off + ret;
+}
+
+static unsigned int
+mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
+{
+ const struct rte_flow_item *item;
+ unsigned int has_vlan = 0;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ has_vlan = 1;
+ break;
+ }
}
- return -rte_errno;
+ if (has_vlan)
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT :
+ MLX5_EXPANSION_ROOT_OUTER;
}
/**
- * Create hash Rx queues when RSS is enabled.
- *
- * @param dev
+ * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
+ * after ensuring the NIC will understand and process it correctly.
+ * The conversion is only performed item/action per item/action, each of
+ * them is written into the @p flow if its size is lesser or equal to @p
+ * flow_size.
+ * Validation and memory consumption computation are still performed until the
+ * end, unless an error is encountered.
+ *
+ * @param[in] dev
* Pointer to Ethernet device.
- * @param parser
- * Internal parser structure.
- * @param flow
- * Pointer to the rte_flow.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small some
+ * garbage may be present.
+ * @param[in] attributes
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the flow has fully been converted and
+ * can be applied, otherwise another call with this returned memory size
+ * should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
- struct mlx5_flow_parse *parser,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const size_t flow_size,
+ const struct rte_flow_attr *attributes,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow local_flow = { .layers = 0, };
+ size_t size = sizeof(*flow);
+ union {
+ struct rte_flow_expand_rss buf;
+ uint8_t buffer[2048];
+ } expand_buffer;
+ struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+ struct mlx5_flow_verbs *original_verbs = NULL;
+ size_t original_verbs_size = 0;
+ uint32_t original_layers = 0;
+ int expanded_pattern_idx = 0;
+ int ret;
+ uint32_t i;
+
+ if (attributes->transfer)
+ return mlx5_flow_merge_switch(dev, flow, flow_size,
+ attributes, pattern,
+ actions, error);
+ if (size > flow_size)
+ flow = &local_flow;
+ ret = mlx5_flow_attributes(dev, attributes, flow, error);
+ if (ret < 0)
+ return ret;
+ ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
+ if (ret < 0)
+ return ret;
+ if (local_flow.rss.types) {
+ unsigned int graph_root;
+
+ graph_root = mlx5_find_graph_root(pattern,
+ local_flow.rss.level);
+ ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
+ pattern, local_flow.rss.types,
+ mlx5_support_expansion,
+ graph_root);
+ assert(ret > 0 &&
+ (unsigned int)ret < sizeof(expand_buffer.buffer));
+ } else {
+ buf->entries = 1;
+ buf->entry[0].pattern = (void *)(uintptr_t)pattern;
+ }
+ size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
+ sizeof(void *));
+ if (size <= flow_size)
+ flow->queue = (void *)(flow + 1);
+ LIST_INIT(&flow->verbs);
+ flow->layers = 0;
+ flow->modifier = 0;
+ flow->fate = 0;
+ for (i = 0; i != buf->entries; ++i) {
+ size_t off = size;
+ size_t off2;
+
+ flow->layers = original_layers;
+ size += sizeof(struct ibv_flow_attr) +
+ sizeof(struct mlx5_flow_verbs);
+ off2 = size;
+ if (size < flow_size) {
+ flow->cur_verbs = (void *)((uintptr_t)flow + off);
+ flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
+ flow->cur_verbs->specs =
+ (void *)(flow->cur_verbs->attr + 1);
+ }
+ /* First iteration convert the pattern into Verbs. */
+ if (i == 0) {
+ /* Actions don't need to be converted several time. */
+ ret = mlx5_flow_actions(dev, actions, flow,
+ (size < flow_size) ?
+ flow_size - size : 0,
+ error);
+ if (ret < 0)
+ return ret;
+ size += ret;
+ } else {
+ /*
+ * Next iteration means the pattern has already been
+ * converted and an expansion is necessary to match
+ * the user RSS request. For that only the expanded
+ * items will be converted, the common part with the
+ * user pattern are just copied into the next buffer
+ * zone.
+ */
+ size += original_verbs_size;
+ if (size < flow_size) {
+ rte_memcpy(flow->cur_verbs->attr,
+ original_verbs->attr,
+ original_verbs_size +
+ sizeof(struct ibv_flow_attr));
+ flow->cur_verbs->size = original_verbs_size;
+ }
+ }
+ ret = mlx5_flow_items
+ (dev,
+ (const struct rte_flow_item *)
+ &buf->entry[i].pattern[expanded_pattern_idx],
+ flow,
+ (size < flow_size) ? flow_size - size : 0, error);
+ if (ret < 0)
+ return ret;
+ size += ret;
+ if (size <= flow_size) {
+ mlx5_flow_adjust_priority(dev, flow);
+ LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
+ }
+ /*
+ * Keep a pointer of the first verbs conversion and the layers
+ * it has encountered.
+ */
+ if (i == 0) {
+ original_verbs = flow->cur_verbs;
+ original_verbs_size = size - off2;
+ original_layers = flow->layers;
+ /*
+ * move the index of the expanded pattern to the
+ * first item not addressed yet.
+ */
+ if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
+ expanded_pattern_idx++;
+ } else {
+ const struct rte_flow_item *item = pattern;
+
+ for (item = pattern;
+ item->type != RTE_FLOW_ITEM_TYPE_END;
+ ++item)
+ expanded_pattern_idx++;
+ }
+ }
+ }
+ /* Restore the origin layers in the flow. */
+ flow->layers = original_layers;
+ return size;
+}
+
+/**
+ * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
+ * if several tunnel rules are used on this queue, the tunnel ptype will be
+ * cleared.
+ *
+ * @param rxq_ctrl
+ * Rx queue to update.
+ */
+static void
+mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
{
unsigned int i;
+ uint32_t tunnel_ptype = 0;
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!parser->queue[i].ibv_attr)
+ /* Look up for the ptype to use. */
+ for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
+ if (!rxq_ctrl->flow_tunnels_n[i])
continue;
- flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr;
- parser->queue[i].ibv_attr = NULL;
- flow->frxq[i].hash_fields = parser->queue[i].hash_fields;
- if (!dev->data->dev_started)
- continue;
- flow->frxq[i].hrxq =
- mlx5_hrxq_get(dev,
- parser->rss_conf.key,
- parser->rss_conf.key_len,
- flow->frxq[i].hash_fields,
- parser->rss_conf.queue,
- parser->rss_conf.queue_num,
- parser->tunnel,
- parser->rss_conf.level);
- if (flow->frxq[i].hrxq)
- continue;
- flow->frxq[i].hrxq =
- mlx5_hrxq_new(dev,
- parser->rss_conf.key,
- parser->rss_conf.key_len,
- flow->frxq[i].hash_fields,
- parser->rss_conf.queue,
- parser->rss_conf.queue_num,
- parser->tunnel,
- parser->rss_conf.level);
- if (!flow->frxq[i].hrxq) {
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL,
- "cannot create hash rxq");
+ if (!tunnel_ptype) {
+ tunnel_ptype = tunnels_info[i].ptype;
+ } else {
+ tunnel_ptype = 0;
+ break;
}
}
- return 0;
+ rxq_ctrl->rxq.tunnel = tunnel_ptype;
}
/**
- * RXQ update after flow rule creation.
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
*
- * @param dev
+ * @param[in] dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to the flow rule.
+ * @param[in] flow
+ * Pointer to flow structure.
*/
static void
-mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow)
+mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct priv *priv = dev->data->dev_private;
+ const int mark = !!(flow->modifier &
+ (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
unsigned int i;
- unsigned int j;
- if (!dev->data->dev_started)
- return;
- for (i = 0; i != flow->rss_conf.queue_num; ++i) {
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)
- [(*flow->queues)[i]];
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- uint8_t tunnel = PTYPE_IDX(flow->tunnel);
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
- rxq_data->mark |= flow->mark;
- if (!tunnel)
- continue;
- rxq_ctrl->tunnel_types[tunnel] += 1;
- /* Clear tunnel type if more than one tunnel types set. */
- for (j = 0; j != RTE_DIM(rxq_ctrl->tunnel_types); ++j) {
- if (j == tunnel)
- continue;
- if (rxq_ctrl->tunnel_types[j] > 0) {
- rxq_data->tunnel = 0;
- break;
+ if (mark) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n++;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Increase the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel & flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]++;
+ break;
+ }
}
+ mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
}
- if (j == RTE_DIM(rxq_ctrl->tunnel_types))
- rxq_data->tunnel = flow->tunnel;
}
}
/**
- * Dump flow hash RX queue detail.
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to the rte_flow.
- * @param hrxq_idx
- * Hash RX queue index.
+ * @param[in] flow
+ * Pointer to the flow.
*/
static void
-mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused,
- struct rte_flow *flow __rte_unused,
- unsigned int hrxq_idx __rte_unused)
+mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
{
-#ifndef NDEBUG
- uintptr_t spec_ptr;
- uint16_t j;
- char buf[256];
- uint8_t off;
- uint64_t extra_hash_fields = 0;
+ struct priv *priv = dev->data->dev_private;
+ const int mark = !!(flow->modifier &
+ (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (flow->tunnel && flow->rss_conf.level > 1)
- extra_hash_fields = (uint32_t)IBV_RX_HASH_INNER;
-#endif
- spec_ptr = (uintptr_t)(flow->frxq[hrxq_idx].ibv_attr + 1);
- for (j = 0, off = 0; j < flow->frxq[hrxq_idx].ibv_attr->num_of_specs;
- j++) {
- struct ibv_flow_spec *spec = (void *)spec_ptr;
- off += sprintf(buf + off, " %x(%hu)", spec->hdr.type,
- spec->hdr.size);
- spec_ptr += spec->hdr.size;
- }
- DRV_LOG(DEBUG,
- "port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p,"
- " hash:%" PRIx64 "/%u specs:%hhu(%hu), priority:%hu, type:%d,"
- " flags:%x, comp_mask:%x specs:%s",
- dev->data->port_id, (void *)flow, hrxq_idx,
- (void *)flow->frxq[hrxq_idx].hrxq,
- (void *)flow->frxq[hrxq_idx].hrxq->qp,
- (void *)flow->frxq[hrxq_idx].hrxq->ind_table,
- (flow->frxq[hrxq_idx].hash_fields | extra_hash_fields),
- flow->rss_conf.queue_num,
- flow->frxq[hrxq_idx].ibv_attr->num_of_specs,
- flow->frxq[hrxq_idx].ibv_attr->size,
- flow->frxq[hrxq_idx].ibv_attr->priority,
- flow->frxq[hrxq_idx].ibv_attr->type,
- flow->frxq[hrxq_idx].ibv_attr->flags,
- flow->frxq[hrxq_idx].ibv_attr->comp_mask,
- buf);
-#endif
+ assert(dev->data->dev_started);
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (mark) {
+ rxq_ctrl->flow_mark_n--;
+ rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Decrease the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel & flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]--;
+ break;
+ }
+ }
+ mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ }
+ }
}
/**
- * Complete flow rule creation.
+ * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
*
* @param dev
* Pointer to Ethernet device.
- * @param parser
- * Internal parser structure.
- * @param flow
- * Pointer to the rte_flow.
+ */
+static void
+mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ unsigned int j;
+
+ if (!(*priv->rxqs)[i])
+ continue;
+ rxq_ctrl = container_of((*priv->rxqs)[i],
+ struct mlx5_rxq_ctrl, rxq);
+ rxq_ctrl->flow_mark_n = 0;
+ rxq_ctrl->rxq.mark = 0;
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
+ rxq_ctrl->flow_tunnels_n[j] = 0;
+ rxq_ctrl->rxq.tunnel = 0;
+ }
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Remove the flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_verbs *verbs;
+
+ if (flow->nl_flow && priv->mnl_socket)
+ mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL);
+ LIST_FOREACH(verbs, &flow->verbs, next) {
+ if (verbs->flow) {
+ claim_zero(mlx5_glue->destroy_flow(verbs->flow));
+ verbs->flow = NULL;
+ }
+ if (verbs->hrxq) {
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, verbs->hrxq);
+ verbs->hrxq = NULL;
+ }
+ }
+ if (flow->counter) {
+ mlx5_flow_counter_release(flow->counter);
+ flow->counter = NULL;
+ }
+}
+
+/**
+ * Apply the flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
* @param[out] error
- * Perform verbose error reporting if not NULL.
+ * Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
- struct mlx5_flow_parse *parser,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
{
- struct priv *priv __rte_unused = dev->data->dev_private;
- int ret;
- unsigned int i;
- unsigned int flows_n = 0;
-
- assert(priv->pd);
- assert(priv->ctx);
- assert(!parser->drop);
- ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error);
- if (ret)
- goto error;
- if (parser->count)
- flow->cs = parser->cs;
- if (!dev->data->dev_started)
- return 0;
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!flow->frxq[i].hrxq)
- continue;
- flow->frxq[i].ibv_flow =
- mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
- flow->frxq[i].ibv_attr);
- mlx5_flow_dump(dev, flow, i);
- if (!flow->frxq[i].ibv_flow) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "flow rule creation failure");
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_verbs *verbs;
+ int err;
+
+ LIST_FOREACH(verbs, &flow->verbs, next) {
+ if (flow->fate & MLX5_FLOW_FATE_DROP) {
+ verbs->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!verbs->hrxq) {
+ rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot get drop hash queue");
+ goto error;
+ }
+ } else {
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_hrxq_get(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ verbs->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num);
+ if (!hrxq)
+ hrxq = mlx5_hrxq_new(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ verbs->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num,
+ !!(flow->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
+ if (!hrxq) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot get hash queue");
+ goto error;
+ }
+ verbs->hrxq = hrxq;
+ }
+ verbs->flow =
+ mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
+ if (!verbs->flow) {
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "hardware refuses to create flow");
goto error;
}
- ++flows_n;
}
- if (!flows_n) {
- rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "internal error in flow creation");
+ if (flow->nl_flow &&
+ priv->mnl_socket &&
+ mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error))
goto error;
- }
- mlx5_flow_create_update_rxqs(dev, flow);
return 0;
error:
- ret = rte_errno; /* Save rte_errno before cleanup. */
- assert(flow);
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (flow->frxq[i].ibv_flow) {
- struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow;
-
- claim_zero(mlx5_glue->destroy_flow(ibv_flow));
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ LIST_FOREACH(verbs, &flow->verbs, next) {
+ if (verbs->hrxq) {
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, verbs->hrxq);
+ verbs->hrxq = NULL;
}
- if (flow->frxq[i].hrxq)
- mlx5_hrxq_release(dev, flow->frxq[i].hrxq);
- if (flow->frxq[i].ibv_attr)
- rte_free(flow->frxq[i].ibv_attr);
- }
- if (flow->cs) {
- claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
- flow->cs = NULL;
- parser->cs = NULL;
}
- rte_errno = ret; /* Restore rte_errno. */
+ rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
/**
- * Convert a flow.
+ * Create a flow and add it to @p list.
*
* @param dev
* Pointer to Ethernet device.
@@ -2441,7 +2998,7 @@ error:
* Pointer to a TAILQ flow list.
* @param[in] attr
* Flow rule attributes.
- * @param[in] pattern
+ * @param[in] items
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
@@ -2459,81 +3016,43 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct mlx5_flow_parse parser = { .create = 1, };
struct rte_flow *flow = NULL;
- unsigned int i;
+ size_t size = 0;
int ret;
- ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
- if (ret)
- goto exit;
- flow = rte_calloc(__func__, 1,
- sizeof(*flow) +
- parser.rss_conf.queue_num * sizeof(uint16_t),
- 0);
+ ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
+ if (ret < 0)
+ return NULL;
+ size = ret;
+ flow = rte_calloc(__func__, 1, size, 0);
if (!flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
- "cannot allocate flow memory");
+ "not enough memory to create flow");
return NULL;
}
- /* Copy configuration. */
- flow->queues = (uint16_t (*)[])(flow + 1);
- flow->tunnel = parser.tunnel;
- flow->rss_conf = (struct rte_flow_action_rss){
- .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
- .level = parser.rss_conf.level,
- .types = parser.rss_conf.types,
- .key_len = parser.rss_conf.key_len,
- .queue_num = parser.rss_conf.queue_num,
- .key = memcpy(flow->rss_key, parser.rss_conf.key,
- sizeof(*parser.rss_conf.key) *
- parser.rss_conf.key_len),
- .queue = memcpy(flow->queues, parser.rss_conf.queue,
- sizeof(*parser.rss_conf.queue) *
- parser.rss_conf.queue_num),
- };
- flow->mark = parser.mark;
- /* finalise the flow. */
- if (parser.drop)
- ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
- error);
- else
- ret = mlx5_flow_create_action_queue(dev, &parser, flow, error);
- if (ret)
- goto exit;
+ ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
+ if (ret < 0) {
+ rte_free(flow);
+ return NULL;
+ }
+ assert((size_t)ret == size);
+ if (dev->data->dev_started) {
+ ret = mlx5_flow_apply(dev, flow, error);
+ if (ret < 0) {
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (flow) {
+ mlx5_flow_remove(dev, flow);
+ rte_free(flow);
+ }
+ rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
+ }
+ }
TAILQ_INSERT_TAIL(list, flow, next);
- DRV_LOG(DEBUG, "port %u flow created %p", dev->data->port_id,
- (void *)flow);
+ mlx5_flow_rxq_flags_set(dev, flow);
return flow;
-exit:
- DRV_LOG(ERR, "port %u flow creation error: %s", dev->data->port_id,
- error->message);
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser.queue[i].ibv_attr)
- rte_free(parser.queue[i].ibv_attr);
- }
- rte_free(flow);
- return NULL;
-}
-
-/**
- * Validate a flow supported by the NIC.
- *
- * @see rte_flow_validate()
- * @see rte_flow_ops
- */
-int
-mlx5_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct mlx5_flow_parse parser = { .create = 0, };
-
- return mlx5_flow_convert(dev, attr, items, actions, error, &parser);
}
/**
@@ -2549,10 +3068,9 @@ mlx5_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
-
- return mlx5_flow_list_create(dev, &priv->flows, attr, items, actions,
- error);
+ return mlx5_flow_list_create
+ (dev, &((struct priv *)dev->data->dev_private)->flows,
+ attr, items, actions, error);
}
/**
@@ -2569,95 +3087,14 @@ static void
mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
- unsigned int i;
-
- if (flow->drop || !dev->data->dev_started)
- goto free;
- for (i = 0; flow->tunnel && i != flow->rss_conf.queue_num; ++i) {
- /* Update queue tunnel type. */
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)
- [(*flow->queues)[i]];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- uint8_t tunnel = PTYPE_IDX(flow->tunnel);
-
- assert(rxq_ctrl->tunnel_types[tunnel] > 0);
- rxq_ctrl->tunnel_types[tunnel] -= 1;
- if (!rxq_ctrl->tunnel_types[tunnel]) {
- /* Update tunnel type. */
- uint8_t j;
- uint8_t types = 0;
- uint8_t last;
-
- for (j = 0; j < RTE_DIM(rxq_ctrl->tunnel_types); j++)
- if (rxq_ctrl->tunnel_types[j]) {
- types += 1;
- last = j;
- }
- /* Keep same if more than one tunnel types left. */
- if (types == 1)
- rxq_data->tunnel = ptype_ext[last];
- else if (types == 0)
- /* No tunnel type left. */
- rxq_data->tunnel = 0;
- }
- }
- for (i = 0; flow->mark && i != flow->rss_conf.queue_num; ++i) {
- struct rte_flow *tmp;
- int mark = 0;
-
- /*
- * To remove the mark from the queue, the queue must not be
- * present in any other marked flow (RSS or not).
- */
- TAILQ_FOREACH(tmp, list, next) {
- unsigned int j;
- uint16_t *tqs = NULL;
- uint16_t tq_n = 0;
-
- if (!tmp->mark)
- continue;
- for (j = 0; j != hash_rxq_init_n; ++j) {
- if (!tmp->frxq[j].hrxq)
- continue;
- tqs = tmp->frxq[j].hrxq->ind_table->queues;
- tq_n = tmp->frxq[j].hrxq->ind_table->queues_n;
- }
- if (!tq_n)
- continue;
- for (j = 0; (j != tq_n) && !mark; j++)
- if (tqs[j] == (*flow->queues)[i])
- mark = 1;
- }
- (*priv->rxqs)[(*flow->queues)[i]]->mark = mark;
- }
-free:
- if (flow->drop) {
- if (flow->frxq[HASH_RXQ_ETH].ibv_flow)
- claim_zero(mlx5_glue->destroy_flow
- (flow->frxq[HASH_RXQ_ETH].ibv_flow));
- rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);
- } else {
- for (i = 0; i != hash_rxq_init_n; ++i) {
- struct mlx5_flow *frxq = &flow->frxq[i];
-
- if (frxq->ibv_flow)
- claim_zero(mlx5_glue->destroy_flow
- (frxq->ibv_flow));
- if (frxq->hrxq)
- mlx5_hrxq_release(dev, frxq->hrxq);
- if (frxq->ibv_attr)
- rte_free(frxq->ibv_attr);
- }
- }
- if (flow->cs) {
- claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
- flow->cs = NULL;
- }
+ mlx5_flow_remove(dev, flow);
TAILQ_REMOVE(list, flow, next);
- DRV_LOG(DEBUG, "port %u flow destroyed %p", dev->data->port_id,
- (void *)flow);
+ /*
+ * Update RX queue flags only if port is started, otherwise it is
+ * already clean.
+ */
+ if (dev->data->dev_started)
+ mlx5_flow_rxq_flags_trim(dev, flow);
rte_free(flow);
}
@@ -2681,135 +3118,6 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
}
/**
- * Create drop queue.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_hrxq_drop *fdq = NULL;
-
- assert(priv->pd);
- assert(priv->ctx);
- fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
- if (!fdq) {
- DRV_LOG(WARNING,
- "port %u cannot allocate memory for drop queue",
- dev->data->port_id);
- rte_errno = ENOMEM;
- return -rte_errno;
- }
- fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
- if (!fdq->cq) {
- DRV_LOG(WARNING, "port %u cannot allocate CQ for drop queue",
- dev->data->port_id);
- rte_errno = errno;
- goto error;
- }
- fdq->wq = mlx5_glue->create_wq
- (priv->ctx,
- &(struct ibv_wq_init_attr){
- .wq_type = IBV_WQT_RQ,
- .max_wr = 1,
- .max_sge = 1,
- .pd = priv->pd,
- .cq = fdq->cq,
- });
- if (!fdq->wq) {
- DRV_LOG(WARNING, "port %u cannot allocate WQ for drop queue",
- dev->data->port_id);
- rte_errno = errno;
- goto error;
- }
- fdq->ind_table = mlx5_glue->create_rwq_ind_table
- (priv->ctx,
- &(struct ibv_rwq_ind_table_init_attr){
- .log_ind_tbl_size = 0,
- .ind_tbl = &fdq->wq,
- .comp_mask = 0,
- });
- if (!fdq->ind_table) {
- DRV_LOG(WARNING,
- "port %u cannot allocate indirection table for drop"
- " queue",
- dev->data->port_id);
- rte_errno = errno;
- goto error;
- }
- fdq->qp = mlx5_glue->create_qp_ex
- (priv->ctx,
- &(struct ibv_qp_init_attr_ex){
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_QP_INIT_ATTR_PD |
- IBV_QP_INIT_ATTR_IND_TABLE |
- IBV_QP_INIT_ATTR_RX_HASH,
- .rx_hash_conf = (struct ibv_rx_hash_conf){
- .rx_hash_function =
- IBV_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_hash_default_key_len,
- .rx_hash_key = rss_hash_default_key,
- .rx_hash_fields_mask = 0,
- },
- .rwq_ind_tbl = fdq->ind_table,
- .pd = priv->pd
- });
- if (!fdq->qp) {
- DRV_LOG(WARNING, "port %u cannot allocate QP for drop queue",
- dev->data->port_id);
- rte_errno = errno;
- goto error;
- }
- priv->flow_drop_queue = fdq;
- return 0;
-error:
- if (fdq->qp)
- claim_zero(mlx5_glue->destroy_qp(fdq->qp));
- if (fdq->ind_table)
- claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
- if (fdq->wq)
- claim_zero(mlx5_glue->destroy_wq(fdq->wq));
- if (fdq->cq)
- claim_zero(mlx5_glue->destroy_cq(fdq->cq));
- if (fdq)
- rte_free(fdq);
- priv->flow_drop_queue = NULL;
- return -rte_errno;
-}
-
-/**
- * Delete drop queue.
- *
- * @param dev
- * Pointer to Ethernet device.
- */
-void
-mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
-
- if (!fdq)
- return;
- if (fdq->qp)
- claim_zero(mlx5_glue->destroy_qp(fdq->qp));
- if (fdq->ind_table)
- claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
- if (fdq->wq)
- claim_zero(mlx5_glue->destroy_wq(fdq->wq));
- if (fdq->cq)
- claim_zero(mlx5_glue->destroy_cq(fdq->cq));
- rte_free(fdq);
- priv->flow_drop_queue = NULL;
-}
-
-/**
* Remove all flows.
*
* @param dev
@@ -2820,68 +3128,11 @@ mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev)
void
mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
- struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
- unsigned int i;
- TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
- struct mlx5_ind_table_ibv *ind_tbl = NULL;
-
- if (flow->drop) {
- if (!flow->frxq[HASH_RXQ_ETH].ibv_flow)
- continue;
- claim_zero(mlx5_glue->destroy_flow
- (flow->frxq[HASH_RXQ_ETH].ibv_flow));
- flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
- DRV_LOG(DEBUG, "port %u flow %p removed",
- dev->data->port_id, (void *)flow);
- /* Next flow. */
- continue;
- }
- /* Verify the flow has not already been cleaned. */
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!flow->frxq[i].ibv_flow)
- continue;
- /*
- * Indirection table may be necessary to remove the
- * flags in the Rx queues.
- * This helps to speed-up the process by avoiding
- * another loop.
- */
- ind_tbl = flow->frxq[i].hrxq->ind_table;
- break;
- }
- if (i == hash_rxq_init_n)
- return;
- if (flow->mark) {
- assert(ind_tbl);
- for (i = 0; i != ind_tbl->queues_n; ++i)
- (*priv->rxqs)[ind_tbl->queues[i]]->mark = 0;
- }
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!flow->frxq[i].ibv_flow)
- continue;
- claim_zero(mlx5_glue->destroy_flow
- (flow->frxq[i].ibv_flow));
- flow->frxq[i].ibv_flow = NULL;
- mlx5_hrxq_release(dev, flow->frxq[i].hrxq);
- flow->frxq[i].hrxq = NULL;
- }
- DRV_LOG(DEBUG, "port %u flow %p removed", dev->data->port_id,
- (void *)flow);
- }
- /* Cleanup Rx queue tunnel info. */
- for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *q = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(q, struct mlx5_rxq_ctrl, rxq);
-
- if (!q)
- continue;
- memset((void *)rxq_ctrl->tunnel_types, 0,
- sizeof(rxq_ctrl->tunnel_types));
- q->tunnel = 0;
- }
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
+ mlx5_flow_remove(dev, flow);
+ mlx5_flow_rxq_flags_clear(dev);
}
/**
@@ -2898,75 +3149,22 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
int
mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
- struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
+ struct rte_flow_error error;
+ int ret = 0;
TAILQ_FOREACH(flow, list, next) {
- unsigned int i;
-
- if (flow->drop) {
- flow->frxq[HASH_RXQ_ETH].ibv_flow =
- mlx5_glue->create_flow
- (priv->flow_drop_queue->qp,
- flow->frxq[HASH_RXQ_ETH].ibv_attr);
- if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
- DRV_LOG(DEBUG,
- "port %u flow %p cannot be applied",
- dev->data->port_id, (void *)flow);
- rte_errno = EINVAL;
- return -rte_errno;
- }
- DRV_LOG(DEBUG, "port %u flow %p applied",
- dev->data->port_id, (void *)flow);
- /* Next flow. */
- continue;
- }
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!flow->frxq[i].ibv_attr)
- continue;
- flow->frxq[i].hrxq =
- mlx5_hrxq_get(dev, flow->rss_conf.key,
- flow->rss_conf.key_len,
- flow->frxq[i].hash_fields,
- flow->rss_conf.queue,
- flow->rss_conf.queue_num,
- flow->tunnel,
- flow->rss_conf.level);
- if (flow->frxq[i].hrxq)
- goto flow_create;
- flow->frxq[i].hrxq =
- mlx5_hrxq_new(dev, flow->rss_conf.key,
- flow->rss_conf.key_len,
- flow->frxq[i].hash_fields,
- flow->rss_conf.queue,
- flow->rss_conf.queue_num,
- flow->tunnel,
- flow->rss_conf.level);
- if (!flow->frxq[i].hrxq) {
- DRV_LOG(DEBUG,
- "port %u flow %p cannot create hash"
- " rxq",
- dev->data->port_id, (void *)flow);
- rte_errno = EINVAL;
- return -rte_errno;
- }
-flow_create:
- mlx5_flow_dump(dev, flow, i);
- flow->frxq[i].ibv_flow =
- mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
- flow->frxq[i].ibv_attr);
- if (!flow->frxq[i].ibv_flow) {
- DRV_LOG(DEBUG,
- "port %u flow %p type %u cannot be"
- " applied",
- dev->data->port_id, (void *)flow, i);
- rte_errno = EINVAL;
- return -rte_errno;
- }
- }
- mlx5_flow_create_update_rxqs(dev, flow);
+ ret = mlx5_flow_apply(dev, flow, &error);
+ if (ret < 0)
+ goto error;
+ mlx5_flow_rxq_flags_set(dev, flow);
}
return 0;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_flow_stop(dev, list);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -3019,7 +3217,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.ingress = 1,
- .priority = MLX5_CTRL_FLOW_PRIORITY,
+ .priority = MLX5_FLOW_PRIO_RSVD,
};
struct rte_flow_item items[] = {
{
@@ -3129,49 +3327,88 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
return 0;
}
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+/**
+ * Isolated mode.
+ *
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ if (dev->data->dev_started) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "port must be stopped first");
+ return -rte_errno;
+ }
+ priv->isolated = !!enable;
+ if (enable)
+ dev->dev_ops = &mlx5_dev_ops_isolate;
+ else
+ dev->dev_ops = &mlx5_dev_ops;
+ return 0;
+}
+
/**
* Query flow counter.
*
- * @param cs
- * the counter set.
- * @param counter_value
- * returned data from the counter.
+ * @param flow
+ * Pointer to the flow.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_flow_query_count(struct ibv_counter_set *cs,
- struct mlx5_flow_counter_stats *counter_stats,
- struct rte_flow_query_count *query_count,
+mlx5_flow_query_count(struct rte_flow *flow __rte_unused,
+ void *data __rte_unused,
struct rte_flow_error *error)
{
- uint64_t counters[2];
- struct ibv_query_counter_set_attr query_cs_attr = {
- .cs = cs,
- .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
- };
- struct ibv_counter_set_data query_out = {
- .out = counters,
- .outlen = 2 * sizeof(uint64_t),
- };
- int err = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
-
- if (err)
- return rte_flow_error_set(error, err,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot read counter");
- query_count->hits_set = 1;
- query_count->bytes_set = 1;
- query_count->hits = counters[0] - counter_stats->hits;
- query_count->bytes = counters[1] - counter_stats->bytes;
- if (query_count->reset) {
- counter_stats->hits = counters[0];
- counter_stats->bytes = counters[1];
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ if (flow->modifier & MLX5_FLOW_MOD_COUNT) {
+ struct rte_flow_query_count *qc = data;
+ uint64_t counters[2] = {0, 0};
+ struct ibv_query_counter_set_attr query_cs_attr = {
+ .cs = flow->counter->cs,
+ .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
+ };
+ struct ibv_counter_set_data query_out = {
+ .out = counters,
+ .outlen = 2 * sizeof(uint64_t),
+ };
+ int err = mlx5_glue->query_counter_set(&query_cs_attr,
+ &query_out);
+
+ if (err)
+ return rte_flow_error_set
+ (error, err,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
+ qc->hits_set = 1;
+ qc->bytes_set = 1;
+ qc->hits = counters[0] - flow->counter->hits;
+ qc->bytes = counters[1] - flow->counter->bytes;
+ if (qc->reset) {
+ flow->counter->hits = counters[0];
+ flow->counter->bytes = counters[1];
+ }
+ return 0;
}
- return 0;
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "flow does not have counter");
+#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "counters are not available");
}
/**
@@ -3183,54 +3420,28 @@ mlx5_flow_query_count(struct ibv_counter_set *cs,
int
mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
struct rte_flow *flow,
- const struct rte_flow_action *action __rte_unused,
+ const struct rte_flow_action *actions,
void *data,
struct rte_flow_error *error)
{
- if (flow->cs) {
- int ret;
+ int ret = 0;
- ret = mlx5_flow_query_count(flow->cs,
- &flow->counter_stats,
- (struct rte_flow_query_count *)data,
- error);
- if (ret)
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_query_count(flow, data, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ if (ret < 0)
return ret;
- } else {
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "no counter found for flow");
- }
- return 0;
-}
-#endif
-
-/**
- * Isolated mode.
- *
- * @see rte_flow_isolate()
- * @see rte_flow_ops
- */
-int
-mlx5_flow_isolate(struct rte_eth_dev *dev,
- int enable,
- struct rte_flow_error *error)
-{
- struct priv *priv = dev->data->dev_private;
-
- if (dev->data->dev_started) {
- rte_flow_error_set(error, EBUSY,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "port must be stopped first");
- return -rte_errno;
}
- priv->isolated = !!enable;
- if (enable)
- dev->dev_ops = &mlx5_dev_ops_isolate;
- else
- dev->dev_ops = &mlx5_dev_ops;
return 0;
}
@@ -3445,9 +3656,6 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
.type = 0,
},
};
- struct mlx5_flow_parse parser = {
- .layer = HASH_RXQ_ETH,
- };
struct rte_flow_error error;
struct rte_flow *flow;
int ret;
@@ -3455,10 +3663,6 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
if (ret)
return ret;
- ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
- attributes.actions, &error, &parser);
- if (ret)
- return ret;
flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
attributes.items, attributes.actions,
&error);
@@ -3482,94 +3686,11 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter)
+mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_eth_fdir_filter *fdir_filter
+ __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_fdir attributes = {
- .attr.group = 0,
- };
- struct mlx5_flow_parse parser = {
- .create = 1,
- .layer = HASH_RXQ_ETH,
- };
- struct rte_flow_error error;
- struct rte_flow *flow;
- unsigned int i;
- int ret;
-
- ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
- if (ret)
- return ret;
- ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
- attributes.actions, &error, &parser);
- if (ret)
- goto exit;
- /*
- * Special case for drop action which is only set in the
- * specifications when the flow is created. In this situation the
- * drop specification is missing.
- */
- if (parser.drop) {
- struct ibv_flow_spec_action_drop *drop;
-
- drop = (void *)((uintptr_t)parser.queue[HASH_RXQ_ETH].ibv_attr +
- parser.queue[HASH_RXQ_ETH].offset);
- *drop = (struct ibv_flow_spec_action_drop){
- .type = IBV_FLOW_SPEC_ACTION_DROP,
- .size = sizeof(struct ibv_flow_spec_action_drop),
- };
- parser.queue[HASH_RXQ_ETH].ibv_attr->num_of_specs++;
- }
- TAILQ_FOREACH(flow, &priv->flows, next) {
- struct ibv_flow_attr *attr;
- struct ibv_spec_header *attr_h;
- void *spec;
- struct ibv_flow_attr *flow_attr;
- struct ibv_spec_header *flow_h;
- void *flow_spec;
- unsigned int specs_n;
- unsigned int queue_id = parser.drop ? HASH_RXQ_ETH :
- parser.layer;
-
- attr = parser.queue[queue_id].ibv_attr;
- flow_attr = flow->frxq[queue_id].ibv_attr;
- /* Compare first the attributes. */
- if (!flow_attr ||
- memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr)))
- continue;
- if (attr->num_of_specs == 0)
- continue;
- spec = (void *)((uintptr_t)attr +
- sizeof(struct ibv_flow_attr));
- flow_spec = (void *)((uintptr_t)flow_attr +
- sizeof(struct ibv_flow_attr));
- specs_n = RTE_MIN(attr->num_of_specs, flow_attr->num_of_specs);
- for (i = 0; i != specs_n; ++i) {
- attr_h = spec;
- flow_h = flow_spec;
- if (memcmp(spec, flow_spec,
- RTE_MIN(attr_h->size, flow_h->size)))
- goto wrong_flow;
- spec = (void *)((uintptr_t)spec + attr_h->size);
- flow_spec = (void *)((uintptr_t)flow_spec +
- flow_h->size);
- }
- /* At this point, the flow match. */
- break;
-wrong_flow:
- /* The flow does not match. */
- continue;
- }
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (flow)
- mlx5_flow_list_destroy(dev, &priv->flows, flow);
-exit:
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser.queue[i].ibv_attr)
- rte_free(parser.queue[i].ibv_attr);
- }
- rte_errno = ret; /* Restore rte_errno. */
+ rte_errno = ENOTSUP;
return -rte_errno;
}
@@ -3725,56 +3846,3 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
}
return 0;
}
-
-/**
- * Detect number of Verbs flow priorities supported.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * number of supported Verbs flow priority.
- */
-unsigned int
-mlx5_get_max_verbs_prio(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- unsigned int verb_priorities = MLX5_VERBS_FLOW_PRIO_8;
- struct {
- struct ibv_flow_attr attr;
- struct ibv_flow_spec_eth eth;
- struct ibv_flow_spec_action_drop drop;
- } flow_attr = {
- .attr = {
- .num_of_specs = 2,
- },
- .eth = {
- .type = IBV_FLOW_SPEC_ETH,
- .size = sizeof(struct ibv_flow_spec_eth),
- },
- .drop = {
- .size = sizeof(struct ibv_flow_spec_action_drop),
- .type = IBV_FLOW_SPEC_ACTION_DROP,
- },
- };
- struct ibv_flow *flow;
-
- do {
- flow_attr.attr.priority = verb_priorities - 1;
- flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp,
- &flow_attr.attr);
- if (flow) {
- claim_zero(mlx5_glue->destroy_flow(flow));
- /* Try more priorities. */
- verb_priorities *= 2;
- } else {
- /* Failed, restore last right number. */
- verb_priorities /= 2;
- break;
- }
- } while (1);
- DRV_LOG(DEBUG, "port %u Verbs flow priorities: %d,"
- " user flow priorities: %d",
- dev->data->port_id, verb_priorities, MLX5_CTRL_FLOW_PRIORITY);
- return verb_priorities;
-}
diff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c
index c7965e51..84f9492a 100644
--- a/drivers/net/mlx5/mlx5_glue.c
+++ b/drivers/net/mlx5/mlx5_glue.c
@@ -4,6 +4,7 @@
*/
#include <errno.h>
+#include <stdalign.h>
#include <stddef.h>
#include <stdint.h>
@@ -23,6 +24,8 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
+#include <rte_config.h>
+
#include "mlx5_autoconf.h"
#include "mlx5_glue.h"
@@ -343,6 +346,7 @@ mlx5_glue_dv_create_qp(struct ibv_context *context,
#endif
}
+alignas(RTE_CACHE_LINE_SIZE)
const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
.version = MLX5_GLUE_VERSION,
.fork_init = mlx5_glue_fork_init,
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 672a4761..12ee37f5 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -49,7 +49,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
struct ifreq request;
int ret;
- ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request);
+ ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request, 0);
if (ret)
return ret;
memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 08105a44..1d1bcb5f 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -198,9 +198,8 @@ mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
0, socket);
if (bt->table == NULL) {
rte_errno = ENOMEM;
- DRV_LOG(ERR,
- "failed to allocate memory for btree cache on socket %d",
- socket);
+ DEBUG("failed to allocate memory for btree cache on socket %d",
+ socket);
return -rte_errno;
}
bt->size = n;
@@ -208,8 +207,8 @@ mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
(*bt->table)[bt->len++] = (struct mlx5_mr_cache) {
.lkey = UINT32_MAX,
};
- DRV_LOG(DEBUG, "initialized B-tree %p with table %p",
- (void *)bt, (void *)bt->table);
+ DEBUG("initialized B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
return 0;
}
@@ -224,8 +223,8 @@ mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
{
if (bt == NULL)
return;
- DRV_LOG(DEBUG, "freeing B-tree %p with table %p",
- (void *)bt, (void *)bt->table);
+ DEBUG("freeing B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
rte_free(bt->table);
memset(bt, 0, sizeof(*bt));
}
@@ -236,9 +235,10 @@ mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
* @param bt
* Pointer to B-tree structure.
*/
-static void
-mlx5_mr_btree_dump(struct mlx5_mr_btree *bt)
+void
+mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
{
+#ifndef NDEBUG
int idx;
struct mlx5_mr_cache *lkp_tbl;
@@ -248,11 +248,11 @@ mlx5_mr_btree_dump(struct mlx5_mr_btree *bt)
for (idx = 0; idx < bt->len; ++idx) {
struct mlx5_mr_cache *entry = &lkp_tbl[idx];
- DRV_LOG(DEBUG,
- "B-tree(%p)[%u],"
- " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
- (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ DEBUG("B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
}
+#endif
}
/**
@@ -576,11 +576,10 @@ alloc_resources:
assert(msl->page_sz == ms->hugepage_sz);
/* Number of memsegs in the range. */
ms_n = len / msl->page_sz;
- DRV_LOG(DEBUG,
- "port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
- " page_sz=0x%" PRIx64 ", ms_n=%u",
- dev->data->port_id, (void *)addr,
- data.start, data.end, msl->page_sz, ms_n);
+ DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " page_sz=0x%" PRIx64 ", ms_n=%u",
+ dev->data->port_id, (void *)addr,
+ data.start, data.end, msl->page_sz, ms_n);
/* Size of memory for bitmap. */
bmp_size = rte_bitmap_get_memory_footprint(ms_n);
mr = rte_zmalloc_socket(NULL,
@@ -589,10 +588,9 @@ alloc_resources:
bmp_size,
RTE_CACHE_LINE_SIZE, msl->socket_id);
if (mr == NULL) {
- DRV_LOG(WARNING,
- "port %u unable to allocate memory for a new MR of"
- " address (%p).",
- dev->data->port_id, (void *)addr);
+ DEBUG("port %u unable to allocate memory for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
rte_errno = ENOMEM;
goto err_nolock;
}
@@ -606,10 +604,9 @@ alloc_resources:
bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
if (mr->ms_bmp == NULL) {
- DRV_LOG(WARNING,
- "port %u unable to initialize bitamp for a new MR of"
- " address (%p).",
- dev->data->port_id, (void *)addr);
+ DEBUG("port %u unable to initialize bitamp for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
rte_errno = EINVAL;
goto err_nolock;
}
@@ -625,11 +622,10 @@ alloc_resources:
data_re = data;
if (len > msl->page_sz &&
!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
- DRV_LOG(WARNING,
- "port %u unable to find virtually contiguous"
- " chunk for address (%p)."
- " rte_memseg_contig_walk() failed.",
- dev->data->port_id, (void *)addr);
+ DEBUG("port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
rte_errno = ENXIO;
goto err_memlock;
}
@@ -657,9 +653,8 @@ alloc_resources:
* here again.
*/
mr_btree_insert(&priv->mr.cache, entry);
- DRV_LOG(DEBUG,
- "port %u found MR for %p on final lookup, abort",
- dev->data->port_id, (void *)addr);
+ DEBUG("port %u found MR for %p on final lookup, abort",
+ dev->data->port_id, (void *)addr);
rte_rwlock_write_unlock(&priv->mr.rwlock);
rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
/*
@@ -707,22 +702,20 @@ alloc_resources:
mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
IBV_ACCESS_LOCAL_WRITE);
if (mr->ibv_mr == NULL) {
- DRV_LOG(WARNING,
- "port %u fail to create a verbs MR for address (%p)",
- dev->data->port_id, (void *)addr);
+ DEBUG("port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
rte_errno = EINVAL;
goto err_mrlock;
}
assert((uintptr_t)mr->ibv_mr->addr == data.start);
assert(mr->ibv_mr->length == len);
LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
- DRV_LOG(DEBUG,
- "port %u MR CREATED (%p) for %p:\n"
- " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
- " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
- dev->data->port_id, (void *)mr, (void *)addr,
- data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
- mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ DEBUG("port %u MR CREATED (%p) for %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
/* Insert to the global cache table. */
mr_insert_dev_cache(dev, mr);
/* Fill in output data. */
@@ -797,8 +790,8 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
int i;
int rebuild = 0;
- DRV_LOG(DEBUG, "port %u free callback: addr=%p, len=%zu",
- dev->data->port_id, addr, len);
+ DEBUG("port %u free callback: addr=%p, len=%zu",
+ dev->data->port_id, addr, len);
msl = rte_mem_virt2memseg_list(addr);
/* addr and len must be page-aligned. */
assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
@@ -825,14 +818,14 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
pos = ms_idx - mr->ms_base_idx;
assert(rte_bitmap_get(mr->ms_bmp, pos));
assert(pos < mr->ms_bmp_n);
- DRV_LOG(DEBUG, "port %u MR(%p): clear bitmap[%u] for addr %p",
- dev->data->port_id, (void *)mr, pos, (void *)start);
+ DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
+ dev->data->port_id, (void *)mr, pos, (void *)start);
rte_bitmap_clear(mr->ms_bmp, pos);
if (--mr->ms_n == 0) {
LIST_REMOVE(mr, mr);
LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
- DRV_LOG(DEBUG, "port %u remove MR(%p) from list",
- dev->data->port_id, (void *)mr);
+ DEBUG("port %u remove MR(%p) from list",
+ dev->data->port_id, (void *)mr);
}
/*
* MR is fragmented or will be freed. the global cache must be
@@ -852,13 +845,11 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
* before the core sees the newly allocated memory.
*/
++priv->mr.dev_gen;
- DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
- priv->mr.dev_gen);
+ DEBUG("broadcasting local cache flush, gen=%d",
+ priv->mr.dev_gen);
rte_smp_wmb();
}
rte_rwlock_write_unlock(&priv->mr.rwlock);
- if (rebuild && rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
- mlx5_mr_dump_dev(dev);
}
/**
@@ -1123,8 +1114,9 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
* Pointer to Ethernet device.
*/
void
-mlx5_mr_dump_dev(struct rte_eth_dev *dev)
+mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
{
+#ifndef NDEBUG
struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
int mr_n = 0;
@@ -1135,11 +1127,10 @@ mlx5_mr_dump_dev(struct rte_eth_dev *dev)
LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
unsigned int n;
- DRV_LOG(DEBUG,
- "port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
- dev->data->port_id, mr_n++,
- rte_cpu_to_be_32(mr->ibv_mr->lkey),
- mr->ms_n, mr->ms_bmp_n);
+ DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+ dev->data->port_id, mr_n++,
+ rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_n, mr->ms_bmp_n);
if (mr->ms_n == 0)
continue;
for (n = 0; n < mr->ms_bmp_n; ) {
@@ -1148,14 +1139,14 @@ mlx5_mr_dump_dev(struct rte_eth_dev *dev)
n = mr_find_next_chunk(mr, &ret, n);
if (!ret.end)
break;
- DRV_LOG(DEBUG,
- " chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
- chunk_n++, ret.start, ret.end);
+ DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
+ chunk_n++, ret.start, ret.end);
}
}
- DRV_LOG(DEBUG, "port %u dumping global cache", dev->data->port_id);
+ DEBUG("port %u dumping global cache", dev->data->port_id);
mlx5_mr_btree_dump(&priv->mr.cache);
rte_rwlock_read_unlock(&priv->mr.rwlock);
+#endif
}
/**
diff --git a/drivers/net/mlx5/mlx5_mr.h b/drivers/net/mlx5/mlx5_mr.h
index e0b28215..a57003fe 100644
--- a/drivers/net/mlx5/mlx5_mr.h
+++ b/drivers/net/mlx5/mlx5_mr.h
@@ -74,9 +74,12 @@ void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg);
int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
struct rte_mempool *mp);
-void mlx5_mr_dump_dev(struct rte_eth_dev *dev);
void mlx5_mr_release(struct rte_eth_dev *dev);
+/* Debug purpose functions. */
+void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt);
+void mlx5_mr_dump_dev(struct rte_eth_dev *dev);
+
/**
* Look up LKey from given lookup table by linear search. Firstly look up the
* last-hit entry. If miss, the entire array is searched. If found, update the
diff --git a/drivers/net/mlx5/mlx5_nl.c b/drivers/net/mlx5/mlx5_nl.c
index dca85835..d61826ae 100644
--- a/drivers/net/mlx5/mlx5_nl.c
+++ b/drivers/net/mlx5/mlx5_nl.c
@@ -3,10 +3,21 @@
* Copyright 2018 Mellanox Technologies, Ltd
*/
+#include <errno.h>
+#include <linux/if_link.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
+#include <net/if.h>
+#include <rdma/rdma_netlink.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
#include <unistd.h>
+#include <rte_errno.h>
+
#include "mlx5.h"
#include "mlx5_utils.h"
@@ -27,6 +38,40 @@
((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg))))
#endif
+/*
+ * The following definitions are normally found in rdma/rdma_netlink.h,
+ * however they are so recent that most systems do not expose them yet.
+ */
+#ifndef HAVE_RDMA_NL_NLDEV
+#define RDMA_NL_NLDEV 5
+#endif
+#ifndef HAVE_RDMA_NLDEV_CMD_GET
+#define RDMA_NLDEV_CMD_GET 1
+#endif
+#ifndef HAVE_RDMA_NLDEV_CMD_PORT_GET
+#define RDMA_NLDEV_CMD_PORT_GET 5
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_INDEX
+#define RDMA_NLDEV_ATTR_DEV_INDEX 1
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_NAME
+#define RDMA_NLDEV_ATTR_DEV_NAME 2
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_PORT_INDEX
+#define RDMA_NLDEV_ATTR_PORT_INDEX 3
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX
+#define RDMA_NLDEV_ATTR_NDEV_INDEX 50
+#endif
+
+/* These are normally found in linux/if_link.h. */
+#ifndef HAVE_IFLA_PHYS_SWITCH_ID
+#define IFLA_PHYS_SWITCH_ID 36
+#endif
+#ifndef HAVE_IFLA_PHYS_PORT_NAME
+#define IFLA_PHYS_PORT_NAME 38
+#endif
+
/* Add/remove MAC address through Netlink */
struct mlx5_nl_mac_addr {
struct ether_addr (*mac)[];
@@ -34,29 +79,35 @@ struct mlx5_nl_mac_addr {
int mac_n; /**< Number of addresses in the array. */
};
+/** Data structure used by mlx5_nl_ifindex_cb(). */
+struct mlx5_nl_ifindex_data {
+ const char *name; /**< IB device name (in). */
+ uint32_t ibindex; /**< IB device index (out). */
+ uint32_t ifindex; /**< Network interface index (out). */
+};
+
/**
* Opens a Netlink socket.
*
- * @param nl_groups
- * Netlink group value (e.g. RTMGRP_LINK).
+ * @param protocol
+ * Netlink protocol (e.g. NETLINK_ROUTE, NETLINK_RDMA).
*
* @return
* A file descriptor on success, a negative errno value otherwise and
* rte_errno is set.
*/
int
-mlx5_nl_init(uint32_t nl_groups)
+mlx5_nl_init(int protocol)
{
int fd;
int sndbuf_size = MLX5_SEND_BUF_SIZE;
int rcvbuf_size = MLX5_RECV_BUF_SIZE;
struct sockaddr_nl local = {
.nl_family = AF_NETLINK,
- .nl_groups = nl_groups,
};
int ret;
- fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
+ fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol);
if (fd == -1) {
rte_errno = errno;
return -rte_errno;
@@ -311,7 +362,7 @@ mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[],
int *mac_n)
{
struct priv *priv = dev->data->dev_private;
- int iface_idx = mlx5_ifindex(dev);
+ unsigned int iface_idx = mlx5_ifindex(dev);
struct {
struct nlmsghdr hdr;
struct ifinfomsg ifm;
@@ -334,9 +385,9 @@ mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[],
int ret;
uint32_t sn = priv->nl_sn++;
- if (priv->nl_socket == -1)
+ if (priv->nl_socket_route == -1)
return 0;
- fd = priv->nl_socket;
+ fd = priv->nl_socket_route;
ret = mlx5_nl_request(fd, &req.hdr, sn, &req.ifm,
sizeof(struct ifinfomsg));
if (ret < 0)
@@ -370,7 +421,7 @@ mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac,
int add)
{
struct priv *priv = dev->data->dev_private;
- int iface_idx = mlx5_ifindex(dev);
+ unsigned int iface_idx = mlx5_ifindex(dev);
struct {
struct nlmsghdr hdr;
struct ndmsg ndm;
@@ -398,9 +449,9 @@ mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac,
int ret;
uint32_t sn = priv->nl_sn++;
- if (priv->nl_socket == -1)
+ if (priv->nl_socket_route == -1)
return 0;
- fd = priv->nl_socket;
+ fd = priv->nl_socket_route;
memcpy(RTA_DATA(&req.rta), mac, ETHER_ADDR_LEN);
req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) +
RTA_ALIGN(req.rta.rta_len);
@@ -549,7 +600,7 @@ static int
mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable)
{
struct priv *priv = dev->data->dev_private;
- int iface_idx = mlx5_ifindex(dev);
+ unsigned int iface_idx = mlx5_ifindex(dev);
struct {
struct nlmsghdr hdr;
struct ifinfomsg ifi;
@@ -569,9 +620,9 @@ mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable)
int ret;
assert(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
- if (priv->nl_socket < 0)
+ if (priv->nl_socket_route < 0)
return 0;
- fd = priv->nl_socket;
+ fd = priv->nl_socket_route;
ret = mlx5_nl_send(fd, &req.hdr, priv->nl_sn++);
if (ret < 0)
return ret;
@@ -625,3 +676,241 @@ mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable)
strerror(rte_errno));
return ret;
}
+
+/**
+ * Process network interface information from Netlink message.
+ *
+ * @param nh
+ * Pointer to Netlink message header.
+ * @param arg
+ * Opaque data pointer for this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_ifindex_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_nl_ifindex_data *data = arg;
+ size_t off = NLMSG_HDRLEN;
+ uint32_t ibindex = 0;
+ uint32_t ifindex = 0;
+ int found = 0;
+
+ if (nh->nlmsg_type !=
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET) &&
+ nh->nlmsg_type !=
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_PORT_GET))
+ goto error;
+ while (off < nh->nlmsg_len) {
+ struct nlattr *na = (void *)((uintptr_t)nh + off);
+ void *payload = (void *)((uintptr_t)na + NLA_HDRLEN);
+
+ if (na->nla_len > nh->nlmsg_len - off)
+ goto error;
+ switch (na->nla_type) {
+ case RDMA_NLDEV_ATTR_DEV_INDEX:
+ ibindex = *(uint32_t *)payload;
+ break;
+ case RDMA_NLDEV_ATTR_DEV_NAME:
+ if (!strcmp(payload, data->name))
+ found = 1;
+ break;
+ case RDMA_NLDEV_ATTR_NDEV_INDEX:
+ ifindex = *(uint32_t *)payload;
+ break;
+ default:
+ break;
+ }
+ off += NLA_ALIGN(na->nla_len);
+ }
+ if (found) {
+ data->ibindex = ibindex;
+ data->ifindex = ifindex;
+ }
+ return 0;
+error:
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
+/**
+ * Get index of network interface associated with some IB device.
+ *
+ * This is the only somewhat safe method to avoid resorting to heuristics
+ * when faced with port representors. Unfortunately it requires at least
+ * Linux 4.17.
+ *
+ * @param nl
+ * Netlink socket of the RDMA kind (NETLINK_RDMA).
+ * @param[in] name
+ * IB device name.
+ *
+ * @return
+ * A valid (nonzero) interface index on success, 0 otherwise and rte_errno
+ * is set.
+ */
+unsigned int
+mlx5_nl_ifindex(int nl, const char *name)
+{
+ static const uint32_t pindex = 1;
+ uint32_t seq = random();
+ struct mlx5_nl_ifindex_data data = {
+ .name = name,
+ .ibindex = 0, /* Determined during first pass. */
+ .ifindex = 0, /* Determined during second pass. */
+ };
+ union {
+ struct nlmsghdr nh;
+ uint8_t buf[NLMSG_HDRLEN +
+ NLA_HDRLEN + NLA_ALIGN(sizeof(data.ibindex)) +
+ NLA_HDRLEN + NLA_ALIGN(sizeof(pindex))];
+ } req = {
+ .nh = {
+ .nlmsg_len = NLMSG_LENGTH(0),
+ .nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_GET),
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP,
+ },
+ };
+ struct nlattr *na;
+ int ret;
+
+ ret = mlx5_nl_send(nl, &req.nh, seq);
+ if (ret < 0)
+ return 0;
+ ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data);
+ if (ret < 0)
+ return 0;
+ if (!data.ibindex)
+ goto error;
+ ++seq;
+ req.nh.nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_PORT_GET);
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.buf) - NLMSG_HDRLEN);
+ na = (void *)((uintptr_t)req.buf + NLMSG_HDRLEN);
+ na->nla_len = NLA_HDRLEN + sizeof(data.ibindex);
+ na->nla_type = RDMA_NLDEV_ATTR_DEV_INDEX;
+ memcpy((void *)((uintptr_t)na + NLA_HDRLEN),
+ &data.ibindex, sizeof(data.ibindex));
+ na = (void *)((uintptr_t)na + NLA_ALIGN(na->nla_len));
+ na->nla_len = NLA_HDRLEN + sizeof(pindex);
+ na->nla_type = RDMA_NLDEV_ATTR_PORT_INDEX;
+ memcpy((void *)((uintptr_t)na + NLA_HDRLEN),
+ &pindex, sizeof(pindex));
+ ret = mlx5_nl_send(nl, &req.nh, seq);
+ if (ret < 0)
+ return 0;
+ ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data);
+ if (ret < 0)
+ return 0;
+ if (!data.ifindex)
+ goto error;
+ return data.ifindex;
+error:
+ rte_errno = ENODEV;
+ return 0;
+}
+
+/**
+ * Process switch information from Netlink message.
+ *
+ * @param nh
+ * Pointer to Netlink message header.
+ * @param arg
+ * Opaque data pointer for this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_switch_info_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_switch_info info = {
+ .master = 0,
+ .representor = 0,
+ .port_name = 0,
+ .switch_id = 0,
+ };
+ size_t off = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ bool port_name_set = false;
+ bool switch_id_set = false;
+
+ if (nh->nlmsg_type != RTM_NEWLINK)
+ goto error;
+ while (off < nh->nlmsg_len) {
+ struct rtattr *ra = (void *)((uintptr_t)nh + off);
+ void *payload = RTA_DATA(ra);
+ char *end;
+ unsigned int i;
+
+ if (ra->rta_len > nh->nlmsg_len - off)
+ goto error;
+ switch (ra->rta_type) {
+ case IFLA_PHYS_PORT_NAME:
+ errno = 0;
+ info.port_name = strtol(payload, &end, 0);
+ if (errno ||
+ (size_t)(end - (char *)payload) != strlen(payload))
+ goto error;
+ port_name_set = true;
+ break;
+ case IFLA_PHYS_SWITCH_ID:
+ info.switch_id = 0;
+ for (i = 0; i < RTA_PAYLOAD(ra); ++i) {
+ info.switch_id <<= 8;
+ info.switch_id |= ((uint8_t *)payload)[i];
+ }
+ switch_id_set = true;
+ break;
+ }
+ off += RTA_ALIGN(ra->rta_len);
+ }
+ info.master = switch_id_set && !port_name_set;
+ info.representor = switch_id_set && port_name_set;
+ memcpy(arg, &info, sizeof(info));
+ return 0;
+error:
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
+/**
+ * Get switch information associated with network interface.
+ *
+ * @param nl
+ * Netlink socket of the ROUTE kind (NETLINK_ROUTE).
+ * @param ifindex
+ * Network interface index.
+ * @param[out] info
+ * Switch information object, populated in case of success.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_switch_info(int nl, unsigned int ifindex, struct mlx5_switch_info *info)
+{
+ uint32_t seq = random();
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ } req = {
+ .nh = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(req.info)),
+ .nlmsg_type = RTM_GETLINK,
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK,
+ },
+ .info = {
+ .ifi_family = AF_UNSPEC,
+ .ifi_index = ifindex,
+ },
+ };
+ int ret;
+
+ ret = mlx5_nl_send(nl, &req.nh, seq);
+ if (ret >= 0)
+ ret = mlx5_nl_recv(nl, seq, mlx5_nl_switch_info_cb, info);
+ return ret;
+}
diff --git a/drivers/net/mlx5/mlx5_nl_flow.c b/drivers/net/mlx5/mlx5_nl_flow.c
new file mode 100644
index 00000000..a1c8c340
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_nl_flow.c
@@ -0,0 +1,1248 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <libmnl/libmnl.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/pkt_cls.h>
+#include <linux/pkt_sched.h>
+#include <linux/rtnetlink.h>
+#include <linux/tc_act/tc_gact.h>
+#include <linux/tc_act/tc_mirred.h>
+#include <netinet/in.h>
+#include <stdalign.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+
+#include "mlx5.h"
+#include "mlx5_autoconf.h"
+
+#ifdef HAVE_TC_ACT_VLAN
+
+#include <linux/tc_act/tc_vlan.h>
+
+#else /* HAVE_TC_ACT_VLAN */
+
+#define TCA_VLAN_ACT_POP 1
+#define TCA_VLAN_ACT_PUSH 2
+#define TCA_VLAN_ACT_MODIFY 3
+#define TCA_VLAN_PARMS 2
+#define TCA_VLAN_PUSH_VLAN_ID 3
+#define TCA_VLAN_PUSH_VLAN_PROTOCOL 4
+#define TCA_VLAN_PAD 5
+#define TCA_VLAN_PUSH_VLAN_PRIORITY 6
+
+struct tc_vlan {
+ tc_gen;
+ int v_action;
+};
+
+#endif /* HAVE_TC_ACT_VLAN */
+
+/* Normally found in linux/netlink.h. */
+#ifndef NETLINK_CAP_ACK
+#define NETLINK_CAP_ACK 10
+#endif
+
+/* Normally found in linux/pkt_sched.h. */
+#ifndef TC_H_MIN_INGRESS
+#define TC_H_MIN_INGRESS 0xfff2u
+#endif
+
+/* Normally found in linux/pkt_cls.h. */
+#ifndef TCA_CLS_FLAGS_SKIP_SW
+#define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
+#endif
+#ifndef HAVE_TCA_FLOWER_ACT
+#define TCA_FLOWER_ACT 3
+#endif
+#ifndef HAVE_TCA_FLOWER_FLAGS
+#define TCA_FLOWER_FLAGS 22
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE
+#define TCA_FLOWER_KEY_ETH_TYPE 8
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST
+#define TCA_FLOWER_KEY_ETH_DST 4
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK
+#define TCA_FLOWER_KEY_ETH_DST_MASK 5
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC
+#define TCA_FLOWER_KEY_ETH_SRC 6
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK
+#define TCA_FLOWER_KEY_ETH_SRC_MASK 7
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO
+#define TCA_FLOWER_KEY_IP_PROTO 9
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC
+#define TCA_FLOWER_KEY_IPV4_SRC 10
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK
+#define TCA_FLOWER_KEY_IPV4_SRC_MASK 11
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST
+#define TCA_FLOWER_KEY_IPV4_DST 12
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK
+#define TCA_FLOWER_KEY_IPV4_DST_MASK 13
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC
+#define TCA_FLOWER_KEY_IPV6_SRC 14
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK
+#define TCA_FLOWER_KEY_IPV6_SRC_MASK 15
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST
+#define TCA_FLOWER_KEY_IPV6_DST 16
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK
+#define TCA_FLOWER_KEY_IPV6_DST_MASK 17
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC
+#define TCA_FLOWER_KEY_TCP_SRC 18
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK
+#define TCA_FLOWER_KEY_TCP_SRC_MASK 35
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST
+#define TCA_FLOWER_KEY_TCP_DST 19
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK
+#define TCA_FLOWER_KEY_TCP_DST_MASK 36
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC
+#define TCA_FLOWER_KEY_UDP_SRC 20
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK
+#define TCA_FLOWER_KEY_UDP_SRC_MASK 37
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST
+#define TCA_FLOWER_KEY_UDP_DST 21
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK
+#define TCA_FLOWER_KEY_UDP_DST_MASK 38
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID
+#define TCA_FLOWER_KEY_VLAN_ID 23
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO
+#define TCA_FLOWER_KEY_VLAN_PRIO 24
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE
+#define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25
+#endif
+
+/** Parser state definitions for mlx5_nl_flow_trans[]. */
+enum mlx5_nl_flow_trans {
+ INVALID,
+ BACK,
+ ATTR,
+ PATTERN,
+ ITEM_VOID,
+ ITEM_PORT_ID,
+ ITEM_ETH,
+ ITEM_VLAN,
+ ITEM_IPV4,
+ ITEM_IPV6,
+ ITEM_TCP,
+ ITEM_UDP,
+ ACTIONS,
+ ACTION_VOID,
+ ACTION_PORT_ID,
+ ACTION_DROP,
+ ACTION_OF_POP_VLAN,
+ ACTION_OF_PUSH_VLAN,
+ ACTION_OF_SET_VLAN_VID,
+ ACTION_OF_SET_VLAN_PCP,
+ END,
+};
+
+#define TRANS(...) (const enum mlx5_nl_flow_trans []){ __VA_ARGS__, INVALID, }
+
+#define PATTERN_COMMON \
+ ITEM_VOID, ITEM_PORT_ID, ACTIONS
+#define ACTIONS_COMMON \
+ ACTION_VOID, ACTION_OF_POP_VLAN, ACTION_OF_PUSH_VLAN, \
+ ACTION_OF_SET_VLAN_VID, ACTION_OF_SET_VLAN_PCP
+#define ACTIONS_FATE \
+ ACTION_PORT_ID, ACTION_DROP
+
+/** Parser state transitions used by mlx5_nl_flow_transpose(). */
+static const enum mlx5_nl_flow_trans *const mlx5_nl_flow_trans[] = {
+ [INVALID] = NULL,
+ [BACK] = NULL,
+ [ATTR] = TRANS(PATTERN),
+ [PATTERN] = TRANS(ITEM_ETH, PATTERN_COMMON),
+ [ITEM_VOID] = TRANS(BACK),
+ [ITEM_PORT_ID] = TRANS(BACK),
+ [ITEM_ETH] = TRANS(ITEM_IPV4, ITEM_IPV6, ITEM_VLAN, PATTERN_COMMON),
+ [ITEM_VLAN] = TRANS(ITEM_IPV4, ITEM_IPV6, PATTERN_COMMON),
+ [ITEM_IPV4] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON),
+ [ITEM_IPV6] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON),
+ [ITEM_TCP] = TRANS(PATTERN_COMMON),
+ [ITEM_UDP] = TRANS(PATTERN_COMMON),
+ [ACTIONS] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_VOID] = TRANS(BACK),
+ [ACTION_PORT_ID] = TRANS(ACTION_VOID, END),
+ [ACTION_DROP] = TRANS(ACTION_VOID, END),
+ [ACTION_OF_POP_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_OF_PUSH_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_OF_SET_VLAN_VID] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_OF_SET_VLAN_PCP] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [END] = NULL,
+};
+
+/** Empty masks for known item types. */
+static const union {
+ struct rte_flow_item_port_id port_id;
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_udp udp;
+} mlx5_nl_flow_mask_empty;
+
+/** Supported masks for known item types. */
+static const struct {
+ struct rte_flow_item_port_id port_id;
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_udp udp;
+} mlx5_nl_flow_mask_supported = {
+ .port_id = {
+ .id = 0xffffffff,
+ },
+ .eth = {
+ .type = RTE_BE16(0xffff),
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ .vlan = {
+ /* PCP and VID only, no DEI. */
+ .tci = RTE_BE16(0xefff),
+ .inner_type = RTE_BE16(0xffff),
+ },
+ .ipv4.hdr = {
+ .next_proto_id = 0xff,
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ },
+ .ipv6.hdr = {
+ .proto = 0xff,
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .tcp.hdr = {
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
+ },
+ .udp.hdr = {
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
+ },
+};
+
+/**
+ * Retrieve mask for pattern item.
+ *
+ * This function does basic sanity checks on a pattern item in order to
+ * return the most appropriate mask for it.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] mask_default
+ * Default mask for pattern item as specified by the flow API.
+ * @param[in] mask_supported
+ * Mask fields supported by the implementation.
+ * @param[in] mask_empty
+ * Empty mask to return when there is no specification.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * Either @p item->mask or one of the mask parameters on success, NULL
+ * otherwise and rte_errno is set.
+ */
+static const void *
+mlx5_nl_flow_item_mask(const struct rte_flow_item *item,
+ const void *mask_default,
+ const void *mask_supported,
+ const void *mask_empty,
+ size_t mask_size,
+ struct rte_flow_error *error)
+{
+ const uint8_t *mask;
+ size_t i;
+
+ /* item->last and item->mask cannot exist without item->spec. */
+ if (!item->spec && (item->mask || item->last)) {
+ rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "\"mask\" or \"last\" field provided without a"
+ " corresponding \"spec\"");
+ return NULL;
+ }
+ /* No spec, no mask, no problem. */
+ if (!item->spec)
+ return mask_empty;
+ mask = item->mask ? item->mask : mask_default;
+ assert(mask);
+ /*
+ * Single-pass check to make sure that:
+ * - Mask is supported, no bits are set outside mask_supported.
+ * - Both item->spec and item->last are included in mask.
+ */
+ for (i = 0; i != mask_size; ++i) {
+ if (!mask[i])
+ continue;
+ if ((mask[i] | ((const uint8_t *)mask_supported)[i]) !=
+ ((const uint8_t *)mask_supported)[i]) {
+ rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask, "unsupported field found in \"mask\"");
+ return NULL;
+ }
+ if (item->last &&
+ (((const uint8_t *)item->spec)[i] & mask[i]) !=
+ (((const uint8_t *)item->last)[i] & mask[i])) {
+ rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ item->last,
+ "range between \"spec\" and \"last\" not"
+ " comprised in \"mask\"");
+ return NULL;
+ }
+ }
+ return mask;
+}
+
+/**
+ * Transpose flow rule description to rtnetlink message.
+ *
+ * This function transposes a flow rule description to a traffic control
+ * (TC) filter creation message ready to be sent over Netlink.
+ *
+ * Target interface is specified as the first entry of the @p ptoi table.
+ * Subsequent entries enable this function to resolve other DPDK port IDs
+ * found in the flow rule.
+ *
+ * @param[out] buf
+ * Output message buffer. May be NULL when @p size is 0.
+ * @param size
+ * Size of @p buf. Message may be truncated if not large enough.
+ * @param[in] ptoi
+ * DPDK port ID to network interface index translation table. This table
+ * is terminated by an entry with a zero ifindex value.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification.
+ * @param[in] actions
+ * Associated actions.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A positive value representing the exact size of the message in bytes
+ * regardless of the @p size parameter on success, a negative errno value
+ * otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_transpose(void *buf,
+ size_t size,
+ const struct mlx5_nl_flow_ptoi *ptoi,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ alignas(struct nlmsghdr)
+ uint8_t buf_tmp[mnl_nlmsg_size(sizeof(struct tcmsg) + 1024)];
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *action;
+ unsigned int n;
+ uint32_t act_index_cur;
+ bool in_port_id_set;
+ bool eth_type_set;
+ bool vlan_present;
+ bool vlan_eth_type_set;
+ bool ip_proto_set;
+ struct nlattr *na_flower;
+ struct nlattr *na_flower_act;
+ struct nlattr *na_vlan_id;
+ struct nlattr *na_vlan_priority;
+ const enum mlx5_nl_flow_trans *trans;
+ const enum mlx5_nl_flow_trans *back;
+
+ if (!size)
+ goto error_nobufs;
+init:
+ item = pattern;
+ action = actions;
+ n = 0;
+ act_index_cur = 0;
+ in_port_id_set = false;
+ eth_type_set = false;
+ vlan_present = false;
+ vlan_eth_type_set = false;
+ ip_proto_set = false;
+ na_flower = NULL;
+ na_flower_act = NULL;
+ na_vlan_id = NULL;
+ na_vlan_priority = NULL;
+ trans = TRANS(ATTR);
+ back = trans;
+trans:
+ switch (trans[n++]) {
+ union {
+ const struct rte_flow_item_port_id *port_id;
+ const struct rte_flow_item_eth *eth;
+ const struct rte_flow_item_vlan *vlan;
+ const struct rte_flow_item_ipv4 *ipv4;
+ const struct rte_flow_item_ipv6 *ipv6;
+ const struct rte_flow_item_tcp *tcp;
+ const struct rte_flow_item_udp *udp;
+ } spec, mask;
+ union {
+ const struct rte_flow_action_port_id *port_id;
+ const struct rte_flow_action_of_push_vlan *of_push_vlan;
+ const struct rte_flow_action_of_set_vlan_vid *
+ of_set_vlan_vid;
+ const struct rte_flow_action_of_set_vlan_pcp *
+ of_set_vlan_pcp;
+ } conf;
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+ struct nlattr *act_index;
+ struct nlattr *act;
+ unsigned int i;
+
+ case INVALID:
+ if (item->type)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "unsupported pattern item combination");
+ else if (action->type)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "unsupported action combination");
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "flow rule lacks some kind of fate action");
+ case BACK:
+ trans = back;
+ n = 0;
+ goto trans;
+ case ATTR:
+ /*
+ * Supported attributes: no groups, some priorities and
+ * ingress only. Don't care about transfer as it is the
+ * caller's problem.
+ */
+ if (attr->group)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "groups are not supported");
+ if (attr->priority > 0xfffe)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "lowest priority level is 0xfffe");
+ if (!attr->ingress)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "only ingress is supported");
+ if (attr->egress)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "egress is not supported");
+ if (size < mnl_nlmsg_size(sizeof(*tcm)))
+ goto error_nobufs;
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = 0;
+ nlh->nlmsg_flags = 0;
+ nlh->nlmsg_seq = 0;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ptoi[0].ifindex;
+ /*
+ * Let kernel pick a handle by default. A predictable handle
+ * can be set by the caller on the resulting buffer through
+ * mlx5_nl_flow_brand().
+ */
+ tcm->tcm_handle = 0;
+ tcm->tcm_parent = TC_H_MAKE(TC_H_INGRESS, TC_H_MIN_INGRESS);
+ /*
+ * Priority cannot be zero to prevent the kernel from
+ * picking one automatically.
+ */
+ tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16,
+ RTE_BE16(ETH_P_ALL));
+ break;
+ case PATTERN:
+ if (!mnl_attr_put_strz_check(buf, size, TCA_KIND, "flower"))
+ goto error_nobufs;
+ na_flower = mnl_attr_nest_start_check(buf, size, TCA_OPTIONS);
+ if (!na_flower)
+ goto error_nobufs;
+ if (!mnl_attr_put_u32_check(buf, size, TCA_FLOWER_FLAGS,
+ TCA_CLS_FLAGS_SKIP_SW))
+ goto error_nobufs;
+ break;
+ case ITEM_VOID:
+ if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
+ goto trans;
+ ++item;
+ break;
+ case ITEM_PORT_ID:
+ if (item->type != RTE_FLOW_ITEM_TYPE_PORT_ID)
+ goto trans;
+ mask.port_id = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_port_id_mask,
+ &mlx5_nl_flow_mask_supported.port_id,
+ &mlx5_nl_flow_mask_empty.port_id,
+ sizeof(mlx5_nl_flow_mask_supported.port_id), error);
+ if (!mask.port_id)
+ return -rte_errno;
+ if (mask.port_id == &mlx5_nl_flow_mask_empty.port_id) {
+ in_port_id_set = 1;
+ ++item;
+ break;
+ }
+ spec.port_id = item->spec;
+ if (mask.port_id->id && mask.port_id->id != 0xffffffff)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.port_id,
+ "no support for partial mask on"
+ " \"id\" field");
+ if (!mask.port_id->id)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == spec.port_id->id)
+ break;
+ if (!ptoi[i].ifindex)
+ return rte_flow_error_set
+ (error, ENODEV, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec.port_id,
+ "missing data to convert port ID to ifindex");
+ tcm = mnl_nlmsg_get_payload(buf);
+ if (in_port_id_set &&
+ ptoi[i].ifindex != (unsigned int)tcm->tcm_ifindex)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec.port_id,
+ "cannot match traffic for several port IDs"
+ " through a single flow rule");
+ tcm->tcm_ifindex = ptoi[i].ifindex;
+ in_port_id_set = 1;
+ ++item;
+ break;
+ case ITEM_ETH:
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH)
+ goto trans;
+ mask.eth = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_eth_mask,
+ &mlx5_nl_flow_mask_supported.eth,
+ &mlx5_nl_flow_mask_empty.eth,
+ sizeof(mlx5_nl_flow_mask_supported.eth), error);
+ if (!mask.eth)
+ return -rte_errno;
+ if (mask.eth == &mlx5_nl_flow_mask_empty.eth) {
+ ++item;
+ break;
+ }
+ spec.eth = item->spec;
+ if (mask.eth->type && mask.eth->type != RTE_BE16(0xffff))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.eth,
+ "no support for partial mask on"
+ " \"type\" field");
+ if (mask.eth->type) {
+ if (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_ETH_TYPE,
+ spec.eth->type))
+ goto error_nobufs;
+ eth_type_set = 1;
+ }
+ if ((!is_zero_ether_addr(&mask.eth->dst) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_DST,
+ ETHER_ADDR_LEN,
+ spec.eth->dst.addr_bytes) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_DST_MASK,
+ ETHER_ADDR_LEN,
+ mask.eth->dst.addr_bytes))) ||
+ (!is_zero_ether_addr(&mask.eth->src) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_SRC,
+ ETHER_ADDR_LEN,
+ spec.eth->src.addr_bytes) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_SRC_MASK,
+ ETHER_ADDR_LEN,
+ mask.eth->src.addr_bytes))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_VLAN:
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN)
+ goto trans;
+ mask.vlan = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_vlan_mask,
+ &mlx5_nl_flow_mask_supported.vlan,
+ &mlx5_nl_flow_mask_empty.vlan,
+ sizeof(mlx5_nl_flow_mask_supported.vlan), error);
+ if (!mask.vlan)
+ return -rte_errno;
+ if (!eth_type_set &&
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_8021Q)))
+ goto error_nobufs;
+ eth_type_set = 1;
+ vlan_present = 1;
+ if (mask.vlan == &mlx5_nl_flow_mask_empty.vlan) {
+ ++item;
+ break;
+ }
+ spec.vlan = item->spec;
+ if ((mask.vlan->tci & RTE_BE16(0xe000) &&
+ (mask.vlan->tci & RTE_BE16(0xe000)) != RTE_BE16(0xe000)) ||
+ (mask.vlan->tci & RTE_BE16(0x0fff) &&
+ (mask.vlan->tci & RTE_BE16(0x0fff)) != RTE_BE16(0x0fff)) ||
+ (mask.vlan->inner_type &&
+ mask.vlan->inner_type != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.vlan,
+ "no support for partial masks on"
+ " \"tci\" (PCP and VID parts) and"
+ " \"inner_type\" fields");
+ if (mask.vlan->inner_type) {
+ if (!mnl_attr_put_u16_check
+ (buf, size, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ spec.vlan->inner_type))
+ goto error_nobufs;
+ vlan_eth_type_set = 1;
+ }
+ if ((mask.vlan->tci & RTE_BE16(0xe000) &&
+ !mnl_attr_put_u8_check
+ (buf, size, TCA_FLOWER_KEY_VLAN_PRIO,
+ (rte_be_to_cpu_16(spec.vlan->tci) >> 13) & 0x7)) ||
+ (mask.vlan->tci & RTE_BE16(0x0fff) &&
+ !mnl_attr_put_u16_check
+ (buf, size, TCA_FLOWER_KEY_VLAN_ID,
+ rte_be_to_cpu_16(spec.vlan->tci & RTE_BE16(0x0fff)))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_IPV4:
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4)
+ goto trans;
+ mask.ipv4 = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_ipv4_mask,
+ &mlx5_nl_flow_mask_supported.ipv4,
+ &mlx5_nl_flow_mask_empty.ipv4,
+ sizeof(mlx5_nl_flow_mask_supported.ipv4), error);
+ if (!mask.ipv4)
+ return -rte_errno;
+ if ((!eth_type_set || !vlan_eth_type_set) &&
+ !mnl_attr_put_u16_check(buf, size,
+ vlan_present ?
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE :
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_IP)))
+ goto error_nobufs;
+ eth_type_set = 1;
+ vlan_eth_type_set = 1;
+ if (mask.ipv4 == &mlx5_nl_flow_mask_empty.ipv4) {
+ ++item;
+ break;
+ }
+ spec.ipv4 = item->spec;
+ if (mask.ipv4->hdr.next_proto_id &&
+ mask.ipv4->hdr.next_proto_id != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.ipv4,
+ "no support for partial mask on"
+ " \"hdr.next_proto_id\" field");
+ if (mask.ipv4->hdr.next_proto_id) {
+ if (!mnl_attr_put_u8_check
+ (buf, size, TCA_FLOWER_KEY_IP_PROTO,
+ spec.ipv4->hdr.next_proto_id))
+ goto error_nobufs;
+ ip_proto_set = 1;
+ }
+ if ((mask.ipv4->hdr.src_addr &&
+ (!mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_SRC,
+ spec.ipv4->hdr.src_addr) ||
+ !mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ mask.ipv4->hdr.src_addr))) ||
+ (mask.ipv4->hdr.dst_addr &&
+ (!mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_DST,
+ spec.ipv4->hdr.dst_addr) ||
+ !mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_DST_MASK,
+ mask.ipv4->hdr.dst_addr))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_IPV6:
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV6)
+ goto trans;
+ mask.ipv6 = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_ipv6_mask,
+ &mlx5_nl_flow_mask_supported.ipv6,
+ &mlx5_nl_flow_mask_empty.ipv6,
+ sizeof(mlx5_nl_flow_mask_supported.ipv6), error);
+ if (!mask.ipv6)
+ return -rte_errno;
+ if ((!eth_type_set || !vlan_eth_type_set) &&
+ !mnl_attr_put_u16_check(buf, size,
+ vlan_present ?
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE :
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_IPV6)))
+ goto error_nobufs;
+ eth_type_set = 1;
+ vlan_eth_type_set = 1;
+ if (mask.ipv6 == &mlx5_nl_flow_mask_empty.ipv6) {
+ ++item;
+ break;
+ }
+ spec.ipv6 = item->spec;
+ if (mask.ipv6->hdr.proto && mask.ipv6->hdr.proto != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.ipv6,
+ "no support for partial mask on"
+ " \"hdr.proto\" field");
+ if (mask.ipv6->hdr.proto) {
+ if (!mnl_attr_put_u8_check
+ (buf, size, TCA_FLOWER_KEY_IP_PROTO,
+ spec.ipv6->hdr.proto))
+ goto error_nobufs;
+ ip_proto_set = 1;
+ }
+ if ((!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.src_addr) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_SRC,
+ sizeof(spec.ipv6->hdr.src_addr),
+ spec.ipv6->hdr.src_addr) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ sizeof(mask.ipv6->hdr.src_addr),
+ mask.ipv6->hdr.src_addr))) ||
+ (!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.dst_addr) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_DST,
+ sizeof(spec.ipv6->hdr.dst_addr),
+ spec.ipv6->hdr.dst_addr) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_DST_MASK,
+ sizeof(mask.ipv6->hdr.dst_addr),
+ mask.ipv6->hdr.dst_addr))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_TCP:
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP)
+ goto trans;
+ mask.tcp = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_tcp_mask,
+ &mlx5_nl_flow_mask_supported.tcp,
+ &mlx5_nl_flow_mask_empty.tcp,
+ sizeof(mlx5_nl_flow_mask_supported.tcp), error);
+ if (!mask.tcp)
+ return -rte_errno;
+ if (!ip_proto_set &&
+ !mnl_attr_put_u8_check(buf, size,
+ TCA_FLOWER_KEY_IP_PROTO,
+ IPPROTO_TCP))
+ goto error_nobufs;
+ if (mask.tcp == &mlx5_nl_flow_mask_empty.tcp) {
+ ++item;
+ break;
+ }
+ spec.tcp = item->spec;
+ if ((mask.tcp->hdr.src_port &&
+ mask.tcp->hdr.src_port != RTE_BE16(0xffff)) ||
+ (mask.tcp->hdr.dst_port &&
+ mask.tcp->hdr.dst_port != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.tcp,
+ "no support for partial masks on"
+ " \"hdr.src_port\" and \"hdr.dst_port\""
+ " fields");
+ if ((mask.tcp->hdr.src_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_SRC,
+ spec.tcp->hdr.src_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_SRC_MASK,
+ mask.tcp->hdr.src_port))) ||
+ (mask.tcp->hdr.dst_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_DST,
+ spec.tcp->hdr.dst_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_DST_MASK,
+ mask.tcp->hdr.dst_port))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_UDP:
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP)
+ goto trans;
+ mask.udp = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_udp_mask,
+ &mlx5_nl_flow_mask_supported.udp,
+ &mlx5_nl_flow_mask_empty.udp,
+ sizeof(mlx5_nl_flow_mask_supported.udp), error);
+ if (!mask.udp)
+ return -rte_errno;
+ if (!ip_proto_set &&
+ !mnl_attr_put_u8_check(buf, size,
+ TCA_FLOWER_KEY_IP_PROTO,
+ IPPROTO_UDP))
+ goto error_nobufs;
+ if (mask.udp == &mlx5_nl_flow_mask_empty.udp) {
+ ++item;
+ break;
+ }
+ spec.udp = item->spec;
+ if ((mask.udp->hdr.src_port &&
+ mask.udp->hdr.src_port != RTE_BE16(0xffff)) ||
+ (mask.udp->hdr.dst_port &&
+ mask.udp->hdr.dst_port != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.udp,
+ "no support for partial masks on"
+ " \"hdr.src_port\" and \"hdr.dst_port\""
+ " fields");
+ if ((mask.udp->hdr.src_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_SRC,
+ spec.udp->hdr.src_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_SRC_MASK,
+ mask.udp->hdr.src_port))) ||
+ (mask.udp->hdr.dst_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_DST,
+ spec.udp->hdr.dst_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_DST_MASK,
+ mask.udp->hdr.dst_port))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ACTIONS:
+ if (item->type != RTE_FLOW_ITEM_TYPE_END)
+ goto trans;
+ assert(na_flower);
+ assert(!na_flower_act);
+ na_flower_act =
+ mnl_attr_nest_start_check(buf, size, TCA_FLOWER_ACT);
+ if (!na_flower_act)
+ goto error_nobufs;
+ act_index_cur = 1;
+ break;
+ case ACTION_VOID:
+ if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
+ goto trans;
+ ++action;
+ break;
+ case ACTION_PORT_ID:
+ if (action->type != RTE_FLOW_ACTION_TYPE_PORT_ID)
+ goto trans;
+ conf.port_id = action->conf;
+ if (conf.port_id->original)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == conf.port_id->id)
+ break;
+ if (!ptoi[i].ifindex)
+ return rte_flow_error_set
+ (error, ENODEV, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ conf.port_id,
+ "missing data to convert port ID to ifindex");
+ act_index =
+ mnl_attr_nest_start_check(buf, size, act_index_cur++);
+ if (!act_index ||
+ !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "mirred"))
+ goto error_nobufs;
+ act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
+ if (!act)
+ goto error_nobufs;
+ if (!mnl_attr_put_check(buf, size, TCA_MIRRED_PARMS,
+ sizeof(struct tc_mirred),
+ &(struct tc_mirred){
+ .action = TC_ACT_STOLEN,
+ .eaction = TCA_EGRESS_REDIR,
+ .ifindex = ptoi[i].ifindex,
+ }))
+ goto error_nobufs;
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ ++action;
+ break;
+ case ACTION_DROP:
+ if (action->type != RTE_FLOW_ACTION_TYPE_DROP)
+ goto trans;
+ act_index =
+ mnl_attr_nest_start_check(buf, size, act_index_cur++);
+ if (!act_index ||
+ !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "gact"))
+ goto error_nobufs;
+ act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
+ if (!act)
+ goto error_nobufs;
+ if (!mnl_attr_put_check(buf, size, TCA_GACT_PARMS,
+ sizeof(struct tc_gact),
+ &(struct tc_gact){
+ .action = TC_ACT_SHOT,
+ }))
+ goto error_nobufs;
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ ++action;
+ break;
+ case ACTION_OF_POP_VLAN:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_POP_VLAN)
+ goto trans;
+ conf.of_push_vlan = NULL;
+ i = TCA_VLAN_ACT_POP;
+ goto action_of_vlan;
+ case ACTION_OF_PUSH_VLAN:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
+ goto trans;
+ conf.of_push_vlan = action->conf;
+ i = TCA_VLAN_ACT_PUSH;
+ goto action_of_vlan;
+ case ACTION_OF_SET_VLAN_VID:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
+ goto trans;
+ conf.of_set_vlan_vid = action->conf;
+ if (na_vlan_id)
+ goto override_na_vlan_id;
+ i = TCA_VLAN_ACT_MODIFY;
+ goto action_of_vlan;
+ case ACTION_OF_SET_VLAN_PCP:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
+ goto trans;
+ conf.of_set_vlan_pcp = action->conf;
+ if (na_vlan_priority)
+ goto override_na_vlan_priority;
+ i = TCA_VLAN_ACT_MODIFY;
+ goto action_of_vlan;
+action_of_vlan:
+ act_index =
+ mnl_attr_nest_start_check(buf, size, act_index_cur++);
+ if (!act_index ||
+ !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "vlan"))
+ goto error_nobufs;
+ act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
+ if (!act)
+ goto error_nobufs;
+ if (!mnl_attr_put_check(buf, size, TCA_VLAN_PARMS,
+ sizeof(struct tc_vlan),
+ &(struct tc_vlan){
+ .action = TC_ACT_PIPE,
+ .v_action = i,
+ }))
+ goto error_nobufs;
+ if (i == TCA_VLAN_ACT_POP) {
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ ++action;
+ break;
+ }
+ if (i == TCA_VLAN_ACT_PUSH &&
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_VLAN_PUSH_VLAN_PROTOCOL,
+ conf.of_push_vlan->ethertype))
+ goto error_nobufs;
+ na_vlan_id = mnl_nlmsg_get_payload_tail(buf);
+ if (!mnl_attr_put_u16_check(buf, size, TCA_VLAN_PAD, 0))
+ goto error_nobufs;
+ na_vlan_priority = mnl_nlmsg_get_payload_tail(buf);
+ if (!mnl_attr_put_u8_check(buf, size, TCA_VLAN_PAD, 0))
+ goto error_nobufs;
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
+override_na_vlan_id:
+ na_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID;
+ *(uint16_t *)mnl_attr_get_payload(na_vlan_id) =
+ rte_be_to_cpu_16
+ (conf.of_set_vlan_vid->vlan_vid);
+ } else if (action->type ==
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
+override_na_vlan_priority:
+ na_vlan_priority->nla_type =
+ TCA_VLAN_PUSH_VLAN_PRIORITY;
+ *(uint8_t *)mnl_attr_get_payload(na_vlan_priority) =
+ conf.of_set_vlan_pcp->vlan_pcp;
+ }
+ ++action;
+ break;
+ case END:
+ if (item->type != RTE_FLOW_ITEM_TYPE_END ||
+ action->type != RTE_FLOW_ACTION_TYPE_END)
+ goto trans;
+ if (na_flower_act)
+ mnl_attr_nest_end(buf, na_flower_act);
+ if (na_flower)
+ mnl_attr_nest_end(buf, na_flower);
+ nlh = buf;
+ return nlh->nlmsg_len;
+ }
+ back = trans;
+ trans = mlx5_nl_flow_trans[trans[n - 1]];
+ n = 0;
+ goto trans;
+error_nobufs:
+ if (buf != buf_tmp) {
+ buf = buf_tmp;
+ size = sizeof(buf_tmp);
+ goto init;
+ }
+ return rte_flow_error_set
+ (error, ENOBUFS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "generated TC message is too large");
+}
+
+/**
+ * Brand rtnetlink buffer with unique handle.
+ *
+ * This handle should be unique for a given network interface to avoid
+ * collisions.
+ *
+ * @param buf
+ * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
+ * @param handle
+ * Unique 32-bit handle to use.
+ */
+void
+mlx5_nl_flow_brand(void *buf, uint32_t handle)
+{
+ struct tcmsg *tcm = mnl_nlmsg_get_payload(buf);
+
+ tcm->tcm_handle = handle;
+}
+
+/**
+ * Send Netlink message with acknowledgment.
+ *
+ * @param nl
+ * Libmnl socket to use.
+ * @param nlh
+ * Message to send. This function always raises the NLM_F_ACK flag before
+ * sending.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_flow_nl_ack(struct mnl_socket *nl, struct nlmsghdr *nlh)
+{
+ alignas(struct nlmsghdr)
+ uint8_t ans[mnl_nlmsg_size(sizeof(struct nlmsgerr)) +
+ nlh->nlmsg_len - sizeof(*nlh)];
+ uint32_t seq = random();
+ int ret;
+
+ nlh->nlmsg_flags |= NLM_F_ACK;
+ nlh->nlmsg_seq = seq;
+ ret = mnl_socket_sendto(nl, nlh, nlh->nlmsg_len);
+ if (ret != -1)
+ ret = mnl_socket_recvfrom(nl, ans, sizeof(ans));
+ if (ret != -1)
+ ret = mnl_cb_run
+ (ans, ret, seq, mnl_socket_get_portid(nl), NULL, NULL);
+ if (!ret)
+ return 0;
+ rte_errno = errno;
+ return -rte_errno;
+}
+
+/**
+ * Create a Netlink flow rule.
+ *
+ * @param nl
+ * Libmnl socket to use.
+ * @param buf
+ * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_create(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error)
+{
+ struct nlmsghdr *nlh = buf;
+
+ nlh->nlmsg_type = RTM_NEWTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ if (!mlx5_nl_flow_nl_ack(nl, nlh))
+ return 0;
+ return rte_flow_error_set
+ (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "netlink: failed to create TC flow rule");
+}
+
+/**
+ * Destroy a Netlink flow rule.
+ *
+ * @param nl
+ * Libmnl socket to use.
+ * @param buf
+ * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error)
+{
+ struct nlmsghdr *nlh = buf;
+
+ nlh->nlmsg_type = RTM_DELTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST;
+ if (!mlx5_nl_flow_nl_ack(nl, nlh))
+ return 0;
+ return rte_flow_error_set
+ (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "netlink: failed to destroy TC flow rule");
+}
+
+/**
+ * Initialize ingress qdisc of a given network interface.
+ *
+ * @param nl
+ * Libmnl socket of the @p NETLINK_ROUTE kind.
+ * @param ifindex
+ * Index of network interface to initialize.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex,
+ struct rte_flow_error *error)
+{
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+ alignas(struct nlmsghdr)
+ uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)];
+
+ /* Destroy existing ingress qdisc and everything attached to it. */
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = RTM_DELQDISC;
+ nlh->nlmsg_flags = NLM_F_REQUEST;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ifindex;
+ tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ tcm->tcm_parent = TC_H_INGRESS;
+ /* Ignore errors when qdisc is already absent. */
+ if (mlx5_nl_flow_nl_ack(nl, nlh) &&
+ rte_errno != EINVAL && rte_errno != ENOENT)
+ return rte_flow_error_set
+ (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "netlink: failed to remove ingress qdisc");
+ /* Create fresh ingress qdisc. */
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = RTM_NEWQDISC;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ifindex;
+ tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ tcm->tcm_parent = TC_H_INGRESS;
+ mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress");
+ if (mlx5_nl_flow_nl_ack(nl, nlh))
+ return rte_flow_error_set
+ (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "netlink: failed to create ingress qdisc");
+ return 0;
+}
+
+/**
+ * Create and configure a libmnl socket for Netlink flow rules.
+ *
+ * @return
+ * A valid libmnl socket object pointer on success, NULL otherwise and
+ * rte_errno is set.
+ */
+struct mnl_socket *
+mlx5_nl_flow_socket_create(void)
+{
+ struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE);
+
+ if (nl) {
+ mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 },
+ sizeof(int));
+ if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID))
+ return nl;
+ }
+ rte_errno = errno;
+ if (nl)
+ mnl_socket_close(nl);
+ return NULL;
+}
+
+/**
+ * Destroy a libmnl socket.
+ */
+void
+mlx5_nl_flow_socket_destroy(struct mnl_socket *nl)
+{
+ mnl_socket_close(nl);
+}
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 0cf370cd..0870d32f 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -21,6 +21,9 @@
#include <rte_vect.h>
#include "mlx5_autoconf.h"
+/* RSS hash key size. */
+#define MLX5_RSS_HASH_KEY_LEN 40
+
/* Get CQE owner bit. */
#define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)
@@ -240,7 +243,9 @@ struct mlx5_cqe {
uint8_t padding[64];
#endif
uint8_t pkt_info;
- uint8_t rsvd0[11];
+ uint8_t rsvd0;
+ uint16_t wqe_id;
+ uint8_t rsvd3[8];
uint32_t rx_hash_res;
uint8_t rx_hash_type;
uint8_t rsvd1[11];
@@ -285,7 +290,10 @@ struct mlx5_cqe {
struct mlx5_mini_cqe8 {
union {
uint32_t rx_hash_result;
- uint32_t checksum;
+ struct {
+ uint16_t checksum;
+ uint16_t stride_idx;
+ };
struct {
uint16_t wqe_counter;
uint8_t s_wqe_opcode;
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index d69b4c09..b95778a8 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -50,10 +50,11 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
return -rte_errno;
}
if (rss_conf->rss_key && rss_conf->rss_key_len) {
- if (rss_conf->rss_key_len != rss_hash_default_key_len) {
+ if (rss_conf->rss_key_len != MLX5_RSS_HASH_KEY_LEN) {
DRV_LOG(ERR,
- "port %u RSS key len must be %zu Bytes long",
- dev->data->port_id, rss_hash_default_key_len);
+ "port %u RSS key len must be %s Bytes long",
+ dev->data->port_id,
+ RTE_STR(MLX5_RSS_HASH_KEY_LEN));
rte_errno = EINVAL;
return -rte_errno;
}
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 80824bc4..e74fdef8 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -32,10 +32,18 @@
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret;
dev->data->promiscuous = 1;
- if (((struct priv *)dev->data->dev_private)->config.vf)
+ if (priv->isolated) {
+ DRV_LOG(WARNING,
+ "port %u cannot enable promiscuous mode"
+ " in flow isolation mode",
+ dev->data->port_id);
+ return;
+ }
+ if (priv->config.vf)
mlx5_nl_promisc(dev, 1);
ret = mlx5_traffic_restart(dev);
if (ret)
@@ -52,10 +60,11 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
void
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret;
dev->data->promiscuous = 0;
- if (((struct priv *)dev->data->dev_private)->config.vf)
+ if (priv->config.vf)
mlx5_nl_promisc(dev, 0);
ret = mlx5_traffic_restart(dev);
if (ret)
@@ -72,10 +81,18 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret;
dev->data->all_multicast = 1;
- if (((struct priv *)dev->data->dev_private)->config.vf)
+ if (priv->isolated) {
+ DRV_LOG(WARNING,
+ "port %u cannot enable allmulticast mode"
+ " in flow isolation mode",
+ dev->data->port_id);
+ return;
+ }
+ if (priv->config.vf)
mlx5_nl_allmulti(dev, 1);
ret = mlx5_traffic_restart(dev);
if (ret)
@@ -92,10 +109,11 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_disable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret;
dev->data->all_multicast = 0;
- if (((struct priv *)dev->data->dev_private)->config.vf)
+ if (priv->config.vf)
mlx5_nl_allmulti(dev, 0);
ret = mlx5_traffic_restart(dev);
if (ret)
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index de3f869e..1f7bfd44 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -52,7 +52,9 @@ uint8_t rss_hash_default_key[] = {
};
/* Length of the default RSS hash key. */
-const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
+static_assert(MLX5_RSS_HASH_KEY_LEN ==
+ (unsigned int)sizeof(rss_hash_default_key),
+ "wrong RSS default key size.");
/**
* Check whether Multi-Packet RQ can be enabled for the device.
@@ -386,8 +388,10 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_JUMBO_FRAME);
+ offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
if (config->hw_fcs_strip)
- offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+
if (config->hw_csum)
offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
@@ -643,7 +647,8 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
doorbell = (uint64_t)doorbell_hi << 32;
doorbell |= rxq->cqn;
rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
- rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
+ mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
+ cq_db_reg, rxq->uar_lock_cq);
}
/**
@@ -818,7 +823,13 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (config->cqe_comp && !rxq_data->hw_timestamp) {
attr.cq.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ attr.cq.mlx5.cqe_comp_res_format =
+ mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
+ MLX5DV_CQE_RES_FORMAT_HASH;
+#else
attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+#endif
/*
* For vectorized Rx, it must not be doubled in order to
* make cq_ci and rq_ci aligned.
@@ -976,7 +987,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
rxq_data->rq_db = rwq.dbrec;
rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
rxq_data->cq_ci = 0;
- rxq_data->strd_ci = 0;
+ rxq_data->consumed_strd = 0;
rxq_data->rq_pi = 0;
rxq_data->zip = (struct rxq_zip){
.ai = 0,
@@ -993,8 +1004,6 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
idx, (void *)&tmpl);
rte_atomic32_inc(&tmpl->refcnt);
- DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
- dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
@@ -1036,9 +1045,6 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->ibv) {
rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
- DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
- dev->data->port_id, rxq_ctrl->idx,
- rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
}
return rxq_ctrl->ibv;
}
@@ -1058,9 +1064,6 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
assert(rxq_ibv);
assert(rxq_ibv->wq);
assert(rxq_ibv->cq);
- DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
- PORT_ID(rxq_ibv->rxq_ctrl->priv),
- rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
rxq_free_elts(rxq_ibv->rxq_ctrl);
claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
@@ -1231,6 +1234,13 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
*/
desc *= 4;
obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
+ /*
+ * rte_mempool_create_empty() has sanity check to refuse large cache
+ * size compared to the number of elements.
+ * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
+ * constant number 2 instead.
+ */
+ obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
/* Check a mempool is already allocated and if it can be resued. */
if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
DRV_LOG(DEBUG, "port %u mempool %s is being reused",
@@ -1346,7 +1356,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
sizeof(struct rte_mbuf_ext_shared_info) +
RTE_PKTMBUF_HEADROOM;
if (mprq_en &&
- desc >= (1U << config->mprq.stride_num_n) &&
+ desc > (1U << config->mprq.stride_num_n) &&
mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
/* TODO: Rx scatter isn't supported yet. */
tmpl->rxq.sges_n = 0;
@@ -1401,6 +1411,14 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
dev->data->dev_conf.rxmode.max_rx_pkt_len,
mb_len - RTE_PKTMBUF_HEADROOM);
}
+ if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
+ DRV_LOG(WARNING,
+ "port %u MPRQ is requested but cannot be enabled"
+ " (requested: desc = %u, stride_sz = %u,"
+ " supported: min_stride_num = %u, max_stride_sz = %u).",
+ dev->data->port_id, desc, mprq_stride_size,
+ (1 << config->mprq.stride_num_n),
+ (1 << config->mprq.max_stride_size_n));
DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
dev->data->port_id, 1 << tmpl->rxq.sges_n);
if (desc % (1 << tmpl->rxq.sges_n)) {
@@ -1419,17 +1437,17 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
/* Configure VLAN stripping. */
tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
- if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
- tmpl->rxq.crc_present = 0;
- } else if (config->hw_fcs_strip) {
- tmpl->rxq.crc_present = 1;
- } else {
- DRV_LOG(WARNING,
- "port %u CRC stripping has been disabled but will"
- " still be performed by hardware, make sure MLNX_OFED"
- " and firmware are up to date",
- dev->data->port_id);
- tmpl->rxq.crc_present = 0;
+ tmpl->rxq.crc_present = 0;
+ if (rte_eth_dev_must_keep_crc(offloads)) {
+ if (config->hw_fcs_strip) {
+ tmpl->rxq.crc_present = 1;
+ } else {
+ DRV_LOG(WARNING,
+ "port %u CRC stripping has been disabled but will"
+ " still be performed by hardware, make sure MLNX_OFED"
+ " and firmware are up to date",
+ dev->data->port_id);
+ }
}
DRV_LOG(DEBUG,
"port %u CRC stripping is %s, %u bytes will be subtracted from"
@@ -1447,10 +1465,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.elts =
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
+#ifndef RTE_ARCH_64
+ tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
+#endif
tmpl->idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
- idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
@@ -1481,9 +1500,6 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
rxq);
mlx5_rxq_ibv_get(dev, idx);
rte_atomic32_inc(&rxq_ctrl->refcnt);
- DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
- dev->data->port_id, rxq_ctrl->idx,
- rte_atomic32_read(&rxq_ctrl->refcnt));
}
return rxq_ctrl;
}
@@ -1511,8 +1527,6 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
assert(rxq_ctrl->priv);
if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
rxq_ctrl->ibv = NULL;
- DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
- rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
@@ -1630,14 +1644,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
}
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
- dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
- rte_atomic32_read(&ind_tbl->refcnt));
return ind_tbl;
error:
rte_free(ind_tbl);
- DRV_LOG(DEBUG, "port %u cannot create indirection table",
- dev->data->port_id);
+ DEBUG("port %u cannot create indirection table", dev->data->port_id);
return NULL;
}
@@ -1672,9 +1682,6 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
unsigned int i;
rte_atomic32_inc(&ind_tbl->refcnt);
- DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
- dev->data->port_id, (void *)ind_tbl,
- rte_atomic32_read(&ind_tbl->refcnt));
for (i = 0; i != ind_tbl->queues_n; ++i)
mlx5_rxq_get(dev, ind_tbl->queues[i]);
}
@@ -1698,15 +1705,9 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
{
unsigned int i;
- DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
- dev->data->port_id, (void *)ind_tbl,
- rte_atomic32_read(&ind_tbl->refcnt));
- if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
+ if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
claim_zero(mlx5_glue->destroy_rwq_ind_table
(ind_tbl->ind_table));
- DEBUG("port %u delete indirection table %p: queues: %u",
- dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
- }
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
@@ -1758,10 +1759,6 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
- * @param tunnel
- * Tunnel type, implies tunnel offloading like inner checksum if available.
- * @param rss_level
- * RSS hash on tunnel level.
*
* @return
* The Verbs object initialised, NULL otherwise and rte_errno is set.
@@ -1771,16 +1768,13 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
- uint32_t tunnel, uint32_t rss_level)
+ int tunnel __rte_unused)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
int err;
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- struct mlx5dv_qp_init_attr qp_init_attr = {0};
-#endif
queues_n = hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
@@ -1791,15 +1785,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
return NULL;
}
if (!rss_key_len) {
- rss_key_len = rss_hash_default_key_len;
+ rss_key_len = MLX5_RSS_HASH_KEY_LEN;
rss_key = rss_hash_default_key;
}
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (tunnel) {
- qp_init_attr.comp_mask =
- MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
- qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
- }
qp = mlx5_glue->dv_create_qp
(priv->ctx,
&(struct ibv_qp_init_attr_ex){
@@ -1811,25 +1800,20 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
.rx_hash_key_len = rss_key_len ? rss_key_len :
- rss_hash_default_key_len,
+ MLX5_RSS_HASH_KEY_LEN,
.rx_hash_key = rss_key ?
(void *)(uintptr_t)rss_key :
rss_hash_default_key,
- .rx_hash_fields_mask = hash_fields |
- (tunnel && rss_level > 1 ?
- (uint32_t)IBV_RX_HASH_INNER : 0),
+ .rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
},
- &qp_init_attr);
- DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
- " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64
- " create_flags:0x%x",
- dev->data->port_id, (void *)qp, (void *)ind_tbl,
- (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) |
- hash_fields, tunnel, rss_level,
- qp_init_attr.comp_mask, qp_init_attr.create_flags);
+ &(struct mlx5dv_qp_init_attr){
+ .comp_mask = tunnel ?
+ MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS : 0,
+ .create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS,
+ });
#else
qp = mlx5_glue->create_qp_ex
(priv->ctx,
@@ -1842,7 +1826,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
.rx_hash_key_len = rss_key_len ? rss_key_len :
- rss_hash_default_key_len,
+ MLX5_RSS_HASH_KEY_LEN,
.rx_hash_key = rss_key ?
(void *)(uintptr_t)rss_key :
rss_hash_default_key,
@@ -1851,10 +1835,6 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
});
- DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
- " tunnel:0x%x level:%hhu",
- dev->data->port_id, (void *)qp, (void *)ind_tbl,
- hash_fields, tunnel, rss_level);
#endif
if (!qp) {
rte_errno = errno;
@@ -1867,14 +1847,9 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
hrxq->qp = qp;
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
- hrxq->tunnel = tunnel;
- hrxq->rss_level = rss_level;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
- DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
- dev->data->port_id, (void *)hrxq,
- rte_atomic32_read(&hrxq->refcnt));
return hrxq;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
@@ -1897,10 +1872,6 @@ error:
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
- * @param tunnel
- * Tunnel type, implies tunnel offloading like inner checksum if available.
- * @param rss_level
- * RSS hash on tunnel level
*
* @return
* An hash Rx queue on success.
@@ -1909,8 +1880,7 @@ struct mlx5_hrxq *
mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n,
- uint32_t tunnel, uint32_t rss_level)
+ const uint16_t *queues, uint32_t queues_n)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
@@ -1925,10 +1895,6 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
continue;
if (hrxq->hash_fields != hash_fields)
continue;
- if (hrxq->tunnel != tunnel)
- continue;
- if (hrxq->rss_level != rss_level)
- continue;
ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
if (!ind_tbl)
continue;
@@ -1937,9 +1903,6 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
- DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
- dev->data->port_id, (void *)hrxq,
- rte_atomic32_read(&hrxq->refcnt));
return hrxq;
}
return NULL;
@@ -1959,15 +1922,8 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
int
mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
- DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
- dev->data->port_id, (void *)hrxq,
- rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:"
- " 0x%x, level: %u",
- dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
- hrxq->tunnel, hrxq->rss_level);
mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
@@ -2001,3 +1957,235 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
}
return ret;
}
+
+/**
+ * Create a drop Rx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ibv_cq *cq;
+ struct ibv_wq *wq = NULL;
+ struct mlx5_rxq_ibv *rxq;
+
+ if (priv->drop_queue.rxq)
+ return priv->drop_queue.rxq;
+ cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
+ if (!cq) {
+ DEBUG("port %u cannot allocate CQ for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ wq = mlx5_glue->create_wq(priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = priv->pd,
+ .cq = cq,
+ });
+ if (!wq) {
+ DEBUG("port %u cannot allocate WQ for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
+ if (!rxq) {
+ DEBUG("port %u cannot allocate drop Rx queue memory",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ rxq->cq = cq;
+ rxq->wq = wq;
+ priv->drop_queue.rxq = rxq;
+ return rxq;
+error:
+ if (wq)
+ claim_zero(mlx5_glue->destroy_wq(wq));
+ if (cq)
+ claim_zero(mlx5_glue->destroy_cq(cq));
+ return NULL;
+}
+
+/**
+ * Release a drop Rx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+void
+mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
+
+ if (rxq->wq)
+ claim_zero(mlx5_glue->destroy_wq(rxq->wq));
+ if (rxq->cq)
+ claim_zero(mlx5_glue->destroy_cq(rxq->cq));
+ rte_free(rxq);
+ priv->drop_queue.rxq = NULL;
+}
+
+/**
+ * Create a drop indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_rxq_ibv *rxq;
+ struct mlx5_ind_table_ibv tmpl;
+
+ rxq = mlx5_rxq_ibv_drop_new(dev);
+ if (!rxq)
+ return NULL;
+ tmpl.ind_table = mlx5_glue->create_rwq_ind_table
+ (priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = 0,
+ .ind_tbl = &rxq->wq,
+ .comp_mask = 0,
+ });
+ if (!tmpl.ind_table) {
+ DEBUG("port %u cannot allocate indirection table for drop"
+ " queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ ind_tbl->ind_table = tmpl.ind_table;
+ return ind_tbl;
+error:
+ mlx5_rxq_ibv_drop_release(dev);
+ return NULL;
+}
+
+/**
+ * Release a drop indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
+
+ claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
+ mlx5_rxq_ibv_drop_release(dev);
+ rte_free(ind_tbl);
+ priv->drop_queue.hrxq->ind_table = NULL;
+}
+
+/**
+ * Create a drop Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ struct ibv_qp *qp;
+ struct mlx5_hrxq *hrxq;
+
+ if (priv->drop_queue.hrxq) {
+ rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
+ return priv->drop_queue.hrxq;
+ }
+ ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+ if (!ind_tbl)
+ return NULL;
+ qp = mlx5_glue->create_qp_ex(priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function =
+ IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
+ .rx_hash_key = rss_hash_default_key,
+ .rx_hash_fields_mask = 0,
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->pd
+ });
+ if (!qp) {
+ DEBUG("port %u cannot allocate QP for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+ if (!hrxq) {
+ DRV_LOG(WARNING,
+ "port %u cannot allocate memory for drop queue",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ hrxq->ind_table = ind_tbl;
+ hrxq->qp = qp;
+ priv->drop_queue.hrxq = hrxq;
+ rte_atomic32_set(&hrxq->refcnt, 1);
+ return hrxq;
+error:
+ if (ind_tbl)
+ mlx5_ind_table_ibv_drop_release(dev);
+ return NULL;
+}
+
+/**
+ * Release a drop hash Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
+
+ if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+ mlx5_ind_table_ibv_drop_release(dev);
+ rte_free(hrxq);
+ priv->drop_queue.hrxq = NULL;
+ }
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 52785946..2d14f8a6 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -38,7 +38,7 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
static __rte_always_inline int
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
- uint16_t cqe_cnt, uint32_t *rss_hash);
+ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
static __rte_always_inline uint32_t
rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
@@ -495,6 +495,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
unsigned int segs_n = 0;
const unsigned int max_inline = txq->max_inline;
+ uint64_t addr_64;
if (unlikely(!pkts_n))
return 0;
@@ -503,8 +504,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
@@ -711,12 +710,12 @@ pkt_inline:
ds = 3;
use_dseg:
/* Add the remaining packet as a simple ds. */
- addr = rte_cpu_to_be_64(addr);
+ addr_64 = rte_cpu_to_be_64(addr);
*dseg = (rte_v128u32_t){
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
- addr,
- addr >> 32,
+ addr_64,
+ addr_64 >> 32,
};
++ds;
if (!segs_n)
@@ -750,12 +749,12 @@ next_seg:
total_length += length;
#endif
/* Store segment information. */
- addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
+ addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
*dseg = (rte_v128u32_t){
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
- addr,
- addr >> 32,
+ addr_64,
+ addr_64 >> 32,
};
(*txq->elts)[++elts_head & elts_m] = buf;
if (--segs_n)
@@ -816,14 +815,13 @@ next_wqe:
/* Check whether completion threshold has been reached. */
comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
last_wqe->ctrl2 = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
last_wqe->ctrl3 = txq->elts_head;
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
} else {
txq->elts_comp = comp;
}
@@ -942,8 +940,6 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
@@ -1032,14 +1028,13 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (comp >= MLX5_TX_COMP_THRESH) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
} else {
txq->elts_comp = comp;
}
@@ -1171,8 +1166,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
do {
struct rte_mbuf *buf = *(pkts++);
uintptr_t addr;
@@ -1329,14 +1322,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
if (comp >= MLX5_TX_COMP_THRESH) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
} else {
txq->elts_comp = comp;
}
@@ -1450,6 +1442,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
unsigned int mpw_room = 0;
unsigned int inl_pad = 0;
uint32_t inl_hdr;
+ uint64_t addr_64;
struct mlx5_mpw mpw = {
.state = MLX5_MPW_STATE_CLOSED,
};
@@ -1459,8 +1452,6 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
@@ -1586,13 +1577,13 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
((uintptr_t)mpw.data.raw +
inl_pad);
(*txq->elts)[elts_head++ & elts_m] = buf;
- addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
- uintptr_t));
+ addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
+ uintptr_t));
*dseg = (rte_v128u32_t) {
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
- addr,
- addr >> 32,
+ addr_64,
+ addr_64 >> 32,
};
mpw.data.raw = (volatile void *)(dseg + 1);
mpw.total_len += (inl_pad + sizeof(*dseg));
@@ -1616,15 +1607,14 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
(1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
txq->mpw_comp = txq->wqe_ci;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
} else {
txq->elts_comp += j;
}
@@ -1722,8 +1712,9 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
* Pointer to RX queue.
* @param cqe
* CQE to process.
- * @param[out] rss_hash
- * Packet RSS Hash result.
+ * @param[out] mcqe
+ * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
+ * written.
*
* @return
* Packet size in bytes (0 if there is none), -1 in case of completion
@@ -1731,7 +1722,7 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
*/
static inline int
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
- uint16_t cqe_cnt, uint32_t *rss_hash)
+ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
{
struct rxq_zip *zip = &rxq->zip;
uint16_t cqe_n = cqe_cnt + 1;
@@ -1745,7 +1736,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
- *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
+ *mcqe = &(*mc)[zip->ai & 7];
if ((++zip->ai & 7) == 0) {
/* Invalidate consumed CQEs */
idx = zip->ca;
@@ -1810,7 +1801,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
/* Get packet size to return. */
len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
- *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
+ *mcqe = &(*mc)[0];
zip->ai = 1;
/* Prefetch all the entries to be invalidated */
idx = zip->ca;
@@ -1821,7 +1812,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
}
} else {
len = rte_be_to_cpu_32(cqe->byte_cnt);
- *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
}
/* Error while receiving packet. */
if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
@@ -1934,7 +1924,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile struct mlx5_wqe_data_seg *wqe =
&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
struct rte_mbuf *rep = (*rxq->elts)[idx];
- uint32_t rss_hash_res = 0;
+ volatile struct mlx5_mini_cqe8 *mcqe = NULL;
+ uint32_t rss_hash_res;
if (pkt)
NEXT(seg) = rep;
@@ -1964,8 +1955,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
if (!pkt) {
cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
- len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
- &rss_hash_res);
+ len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
if (!len) {
rte_mbuf_raw_free(rep);
break;
@@ -1979,6 +1969,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt = seg;
assert(len >= (rxq->crc_present << 2));
pkt->ol_flags = 0;
+ /* If compressed, take hash result from mini-CQE. */
+ rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
+ cqe->rx_hash_res :
+ mcqe->rx_hash_result);
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
if (rxq->crc_present)
len -= ETHER_CRC_LEN;
@@ -2104,7 +2098,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
unsigned int i = 0;
uint16_t rq_ci = rxq->rq_ci;
- uint16_t strd_idx = rxq->strd_ci;
+ uint16_t consumed_strd = rxq->consumed_strd;
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
while (i < pkts_n) {
@@ -2112,12 +2106,14 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
void *addr;
int ret;
unsigned int len;
- uint16_t consumed_strd;
+ uint16_t strd_cnt;
+ uint16_t strd_idx;
uint32_t offset;
uint32_t byte_cnt;
+ volatile struct mlx5_mini_cqe8 *mcqe = NULL;
uint32_t rss_hash_res = 0;
- if (strd_idx == strd_n) {
+ if (consumed_strd == strd_n) {
/* Replace WQE only if the buffer is still in use. */
if (rte_atomic16_read(&buf->refcnt) > 1) {
mprq_buf_replace(rxq, rq_ci & wq_mask);
@@ -2137,12 +2133,12 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rxq->mprq_repl = rep;
}
/* Advance to the next WQE. */
- strd_idx = 0;
+ consumed_strd = 0;
++rq_ci;
buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
}
cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
- ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &rss_hash_res);
+ ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
if (!ret)
break;
if (unlikely(ret == -1)) {
@@ -2151,14 +2147,21 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
continue;
}
byte_cnt = ret;
- consumed_strd = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
- MLX5_MPRQ_STRIDE_NUM_SHIFT;
- assert(consumed_strd);
- /* Calculate offset before adding up stride index. */
- offset = strd_idx * strd_sz + strd_shift;
- strd_idx += consumed_strd;
+ strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
+ MLX5_MPRQ_STRIDE_NUM_SHIFT;
+ assert(strd_cnt);
+ consumed_strd += strd_cnt;
if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
continue;
+ if (mcqe == NULL) {
+ rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
+ strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
+ } else {
+ /* mini-CQE for MPRQ doesn't have hash result. */
+ strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
+ }
+ assert(strd_idx < strd_n);
+ assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
/*
* Currently configured to receive a packet per a stride. But if
* MTU is adjusted through kernel interface, device could
@@ -2166,7 +2169,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* case, the packet should be dropped because it is bigger than
* the max_rx_pkt_len.
*/
- if (unlikely(consumed_strd > 1)) {
+ if (unlikely(strd_cnt > 1)) {
++rxq->stats.idropped;
continue;
}
@@ -2179,6 +2182,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
assert((int)len >= (rxq->crc_present << 2));
if (rxq->crc_present)
len -= ETHER_CRC_LEN;
+ offset = strd_idx * strd_sz + strd_shift;
addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset);
/* Initialize the offload flag. */
pkt->ol_flags = 0;
@@ -2201,7 +2205,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
} else {
rte_iova_t buf_iova;
struct rte_mbuf_ext_shared_info *shinfo;
- uint16_t buf_len = consumed_strd * strd_sz;
+ uint16_t buf_len = strd_cnt * strd_sz;
/* Increment the refcnt of the whole chunk. */
rte_atomic16_add_return(&buf->refcnt, 1);
@@ -2250,7 +2254,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
++i;
}
/* Update the consumer indexes. */
- rxq->strd_ci = strd_idx;
+ rxq->consumed_strd = consumed_strd;
rte_cio_wmb();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
if (rq_ci != rxq->rq_ci) {
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index f53bb43c..48ed2b20 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -26,6 +26,8 @@
#include <rte_common.h>
#include <rte_hexdump.h>
#include <rte_atomic.h>
+#include <rte_spinlock.h>
+#include <rte_io.h>
#include "mlx5_utils.h"
#include "mlx5.h"
@@ -34,6 +36,9 @@
#include "mlx5_defs.h"
#include "mlx5_prm.h"
+/* Support tunnel matching. */
+#define MLX5_FLOW_TUNNEL 5
+
struct mlx5_rxq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX5_PMD_SOFT_COUNTERS
@@ -93,7 +98,7 @@ struct mlx5_rxq_data {
volatile uint32_t *cq_db;
uint16_t port_id;
uint16_t rq_ci;
- uint16_t strd_ci; /* Stride index in a WQE for Multi-Packet RQ. */
+ uint16_t consumed_strd; /* Number of consumed strides in WQE. */
uint16_t rq_pi;
uint16_t cq_ci;
struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
@@ -115,6 +120,10 @@ struct mlx5_rxq_data {
void *cq_uar; /* CQ user access region. */
uint32_t cqn; /* CQ number. */
uint8_t cq_arm_sn; /* CQ arm seq number. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t *uar_lock_cq;
+ /* CQ (UAR) access lock required for 32bit implementations */
+#endif
uint32_t tunnel; /* Tunnel information. */
} __rte_cache_aligned;
@@ -136,9 +145,10 @@ struct mlx5_rxq_ctrl {
struct priv *priv; /* Back pointer to private data. */
struct mlx5_rxq_data rxq; /* Data path structure. */
unsigned int socket; /* CPU socket ID for allocations. */
- uint32_t tunnel_types[16]; /* Tunnel type counter. */
unsigned int irq:1; /* Whether IRQ is enabled. */
uint16_t idx; /* Queue index. */
+ uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
+ uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
};
/* Indirection table. */
@@ -157,8 +167,6 @@ struct mlx5_hrxq {
struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
uint64_t hash_fields; /* Verbs Hash fields. */
- uint32_t tunnel; /* Tunnel type. */
- uint32_t rss_level; /* RSS on tunnel level. */
uint32_t rss_key_len; /* Hash key length in bytes. */
uint8_t rss_key[]; /* Hash key. */
};
@@ -196,6 +204,10 @@ struct mlx5_txq_data {
volatile void *bf_reg; /* Blueflame register remapped. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t *uar_lock;
+ /* UAR access lock required for 32bit implementations */
+#endif
} __rte_cache_aligned;
/* Verbs Rx queue elements. */
@@ -225,7 +237,6 @@ struct mlx5_txq_ctrl {
/* mlx5_rxq.c */
extern uint8_t rss_hash_default_key[];
-extern const size_t rss_hash_default_key_len;
int mlx5_check_mprq_support(struct rte_eth_dev *dev);
int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
@@ -245,6 +256,8 @@ struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev);
int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
@@ -265,18 +278,21 @@ struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_ibv *ind_tbl);
int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev);
struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
- uint32_t tunnel, uint32_t rss_level);
+ int tunnel __rte_unused);
struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n,
- uint32_t tunnel, uint32_t rss_level);
+ const uint16_t *queues, uint32_t queues_n);
int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
+void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
uint64_t mlx5_get_rx_port_offloads(void);
uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
@@ -348,6 +364,63 @@ void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
+/**
+ * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
+ * 64bit architectures.
+ *
+ * @param val
+ * value to write in CPU endian format.
+ * @param addr
+ * Address to write to.
+ * @param lock
+ * Address of the lock to use for that UAR access.
+ */
+static __rte_always_inline void
+__mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr,
+ rte_spinlock_t *lock __rte_unused)
+{
+#ifdef RTE_ARCH_64
+ rte_write64_relaxed(val, addr);
+#else /* !RTE_ARCH_64 */
+ rte_spinlock_lock(lock);
+ rte_write32_relaxed(val, addr);
+ rte_io_wmb();
+ rte_write32_relaxed(val >> 32,
+ (volatile void *)((volatile char *)addr + 4));
+ rte_spinlock_unlock(lock);
+#endif
+}
+
+/**
+ * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
+ * 64bit architectures while guaranteeing the order of execution with the
+ * code being executed.
+ *
+ * @param val
+ * value to write in CPU endian format.
+ * @param addr
+ * Address to write to.
+ * @param lock
+ * Address of the lock to use for that UAR access.
+ */
+static __rte_always_inline void
+__mlx5_uar_write64(uint64_t val, volatile void *addr, rte_spinlock_t *lock)
+{
+ rte_io_wmb();
+ __mlx5_uar_write64_relaxed(val, addr, lock);
+}
+
+/* Assist macros, used instead of directly calling the functions they wrap. */
+#ifdef RTE_ARCH_64
+#define mlx5_uar_write64_relaxed(val, dst, lock) \
+ __mlx5_uar_write64_relaxed(val, dst, NULL)
+#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
+#else
+#define mlx5_uar_write64_relaxed(val, dst, lock) \
+ __mlx5_uar_write64_relaxed(val, dst, lock)
+#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
+#endif
+
#ifndef NDEBUG
/**
* Verify or set magic value in CQE.
@@ -362,7 +435,7 @@ static inline int
check_cqe_seen(volatile struct mlx5_cqe *cqe)
{
static const uint8_t magic[] = "seen";
- volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
+ volatile uint8_t (*buf)[sizeof(cqe->rsvd1)] = &cqe->rsvd1;
int ret = 1;
unsigned int i;
@@ -614,7 +687,7 @@ mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
*txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
/* Ensure ordering between DB record and BF copy. */
rte_wmb();
- *dst = *src;
+ mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
if (cond)
rte_wmb();
}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 598dc751..fb884f92 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -91,9 +91,9 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
unsigned int i;
- assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
+ assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
- assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
+ assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 71a5eaf2..b37b7381 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -107,8 +107,6 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
if (unlikely(!pkts_n))
return 0;
for (n = 0; n < pkts_n; ++n) {
@@ -176,12 +174,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
@@ -245,8 +242,6 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
if (unlikely(!pkts_n))
@@ -282,11 +277,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
txq->elts_comp += pkts_n;
} else {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request a completion. */
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
comp_req = 8;
}
/* Fill CTRL in the header. */
@@ -739,7 +733,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 3e985d61..54b3783c 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -107,8 +107,6 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
if (unlikely(!pkts_n))
return 0;
for (n = 0; n < pkts_n; ++n) {
@@ -177,12 +175,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
@@ -244,8 +241,6 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
@@ -283,11 +278,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
txq->elts_comp += pkts_n;
} else {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request a completion. */
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
comp_req = 8;
}
/* Fill CTRL in the header. */
@@ -724,7 +718,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 99297d5c..a3a52291 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -36,6 +36,12 @@ mlx5_socket_init(struct rte_eth_dev *dev)
int flags;
/*
+ * Close the last socket that was used to communicate
+ * with the secondary process
+ */
+ if (priv->primary_socket)
+ mlx5_socket_uninit(dev);
+ /*
* Initialise the socket to communicate with the secondary
* process.
*/
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 875dd102..91f3d474 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -146,7 +146,7 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
et_stats->cmd = ETHTOOL_GSTATS;
et_stats->n_stats = xstats_ctrl->stats_n;
ifr.ifr_data = (caddr_t)et_stats;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
if (ret) {
DRV_LOG(WARNING,
"port %u unable to read statistic values from device",
@@ -194,7 +194,7 @@ mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
drvinfo.cmd = ETHTOOL_GDRVINFO;
ifr.ifr_data = (caddr_t)&drvinfo;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
if (ret) {
DRV_LOG(WARNING, "port %u unable to query number of statistics",
dev->data->port_id);
@@ -244,7 +244,7 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
strings->string_set = ETH_SS_STATS;
strings->len = dev_stats_n;
ifr.ifr_data = (caddr_t)strings;
- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
if (ret) {
DRV_LOG(WARNING, "port %u unable to get statistic names",
dev->data->port_id);
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 3e7c0a90..e2a9bb70 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -46,7 +46,6 @@ mlx5_txq_start(struct rte_eth_dev *dev)
unsigned int i;
int ret;
- /* Add memory regions to Tx queues. */
for (i = 0; i != priv->txqs_n; ++i) {
struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
@@ -60,12 +59,17 @@ mlx5_txq_start(struct rte_eth_dev *dev)
}
}
ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
- if (ret)
+ if (ret) {
+ /* Adjust index for rollback. */
+ i = priv->txqs_n - 1;
goto error;
+ }
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_txq_stop(dev);
+ do {
+ mlx5_txq_release(dev, i);
+ } while (i-- != 0);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@@ -103,8 +107,10 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
int ret = 0;
/* Allocate/reuse/resize mempool for Multi-Packet RQ. */
- if (mlx5_mprq_alloc_mp(dev))
- goto error;
+ if (mlx5_mprq_alloc_mp(dev)) {
+ /* Should not release Rx queues but return immediately. */
+ return -rte_errno;
+ }
for (i = 0; i != priv->rxqs_n; ++i) {
struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
struct rte_mempool *mp;
@@ -130,7 +136,9 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_rxq_stop(dev);
+ do {
+ mlx5_rxq_release(dev, i);
+ } while (i-- != 0);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@@ -152,23 +160,21 @@ mlx5_dev_start(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
int ret;
- dev->data->dev_started = 1;
- DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx queues",
- dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
ret = mlx5_txq_start(dev);
if (ret) {
DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
dev->data->port_id, strerror(rte_errno));
- goto error;
+ return -rte_errno;
}
ret = mlx5_rxq_start(dev);
if (ret) {
DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
dev->data->port_id, strerror(rte_errno));
- goto error;
+ mlx5_txq_stop(dev);
+ return -rte_errno;
}
- if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
- mlx5_mr_dump_dev(dev);
+ dev->data->dev_started = 1;
ret = mlx5_rx_intr_vec_enable(dev);
if (ret) {
DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
@@ -223,8 +229,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
dev->tx_pkt_burst = removed_tx_burst;
rte_wmb();
usleep(1000 * priv->rxqs_n);
- DRV_LOG(DEBUG, "port %u cleaning up and destroying hash Rx queues",
- dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
mlx5_flow_stop(dev, &priv->flows);
mlx5_traffic_disable(dev);
mlx5_rx_intr_vec_disable(dev);
@@ -302,9 +307,8 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
struct rte_flow_item_vlan vlan_spec = {
.tci = rte_cpu_to_be_16(vlan),
};
- struct rte_flow_item_vlan vlan_mask = {
- .tci = 0xffff,
- };
+ struct rte_flow_item_vlan vlan_mask =
+ rte_flow_item_vlan_mask;
ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
&vlan_spec, &vlan_mask);
@@ -341,9 +345,8 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
struct rte_flow_item_vlan vlan_spec = {
.tci = rte_cpu_to_be_16(vlan),
};
- struct rte_flow_item_vlan vlan_mask = {
- .tci = 0xffff,
- };
+ struct rte_flow_item_vlan vlan_mask =
+ rte_flow_item_vlan_mask;
ret = mlx5_ctrl_flow_vlan(dev, &unicast,
&unicast_mask,
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 691ea071..f9bc4739 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -113,15 +113,20 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
DEV_TX_OFFLOAD_TCP_CKSUM);
if (config->tso)
offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (config->swp) {
+ if (config->hw_csum)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (config->tso)
+ offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ }
+
if (config->tunnel_en) {
if (config->hw_csum)
offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
if (config->tso)
offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO);
- if (config->swp)
- offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
- DEV_TX_OFFLOAD_UDP_TNL_TSO);
}
return offloads;
}
@@ -250,6 +255,9 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
struct mlx5_txq_ctrl *txq_ctrl;
int already_mapped;
size_t page_size = sysconf(_SC_PAGESIZE);
+#ifndef RTE_ARCH_64
+ unsigned int lock_idx;
+#endif
memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
/*
@@ -276,7 +284,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
}
/* new address in reserved UAR address space. */
addr = RTE_PTR_ADD(priv->uar_base,
- uar_va & (MLX5_UAR_SIZE - 1));
+ uar_va & (uintptr_t)(MLX5_UAR_SIZE - 1));
if (!already_mapped) {
pages[pages_n++] = uar_va;
/* fixed mmap to specified address in reserved
@@ -300,6 +308,12 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
else
assert(txq_ctrl->txq.bf_reg ==
RTE_PTR_ADD((void *)addr, off));
+#ifndef RTE_ARCH_64
+ /* Assign a UAR lock according to UAR page number */
+ lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
+ MLX5_UAR_PAGE_NUM_MASK;
+ txq->uar_lock = &priv->uar_lock[lock_idx];
+#endif
}
return 0;
}
@@ -429,7 +443,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
/* Move the QP to this state. */
.qp_state = IBV_QPS_INIT,
/* Primary port number. */
- .port_num = priv->port
+ .port_num = 1,
};
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
(IBV_QP_STATE | IBV_QP_PORT));
@@ -506,6 +520,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
rte_atomic32_inc(&txq_ibv->refcnt);
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
+ DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%lx",
+ dev->data->port_id, txq_ctrl->uar_mmap_offset);
} else {
DRV_LOG(ERR,
"port %u failed to retrieve UAR info, invalid"
@@ -514,8 +530,6 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
rte_errno = EINVAL;
goto error;
}
- DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
- dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt));
LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
txq_ibv->txq_ctrl = txq_ctrl;
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
@@ -553,12 +567,8 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
if (!(*priv->txqs)[idx])
return NULL;
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (txq_ctrl->ibv) {
+ if (txq_ctrl->ibv)
rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
- DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
- dev->data->port_id, txq_ctrl->idx,
- rte_atomic32_read(&txq_ctrl->ibv->refcnt));
- }
return txq_ctrl->ibv;
}
@@ -575,9 +585,6 @@ int
mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
{
assert(txq_ibv);
- DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
- PORT_ID(txq_ibv->txq_ctrl->priv),
- txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
@@ -716,7 +723,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
max_tso_inline);
txq_ctrl->txq.tso_en = 1;
}
- txq_ctrl->txq.tunnel_en = config->tunnel_en;
+ txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
DEV_TX_OFFLOAD_UDP_TNL_TSO |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
@@ -778,8 +785,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
- idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
error:
@@ -809,9 +814,6 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
txq);
mlx5_txq_ibv_get(dev, idx);
rte_atomic32_inc(&ctrl->refcnt);
- DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
- dev->data->port_id,
- ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
}
return ctrl;
}
@@ -837,8 +839,6 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
if (!(*priv->txqs)[idx])
return 0;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
- txq->idx, rte_atomic32_read(&txq->refcnt));
if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
txq->ibv = NULL;
if (priv->uar_base)
diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index ea6a7864..a2d0576e 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -94,6 +94,8 @@ struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
+int mrvl_logtype;
+
struct mrvl_ifnames {
const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
int idx;
@@ -206,7 +208,7 @@ mrvl_init_hif(int core_id)
ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
return ret;
}
@@ -216,7 +218,7 @@ mrvl_init_hif(int core_id)
params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
ret = pp2_hif_init(&params, &hifs[core_id]);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
return ret;
}
@@ -235,7 +237,7 @@ mrvl_get_hif(struct mrvl_priv *priv, int core_id)
ret = mrvl_init_hif(core_id);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
goto out;
}
@@ -265,7 +267,7 @@ static int
mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
{
if (rss_conf->rss_key)
- RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n");
+ MRVL_LOG(WARNING, "Changing hash key is not supported");
if (rss_conf->rss_hf == 0) {
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
@@ -307,19 +309,21 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
- RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
+ MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
dev->data->dev_conf.rxmode.mq_mode);
return -EINVAL;
}
- if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
- RTE_LOG(INFO, PMD,
- "L2 CRC stripping is always enabled in hw\n");
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) {
+ MRVL_LOG(INFO, "L2 CRC stripping is always enabled in hw");
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
if (dev->data->dev_conf.rxmode.split_hdr_size) {
- RTE_LOG(INFO, PMD, "Split headers not supported\n");
+ MRVL_LOG(INFO, "Split headers not supported");
return -EINVAL;
}
@@ -343,7 +347,7 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
if (dev->data->nb_rx_queues == 1 &&
dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
- RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n");
+ MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
return 0;
@@ -467,7 +471,7 @@ mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
/* passing 1 enables given tx queue */
ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to start txq %d\n", queue_id);
+ MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
return ret;
}
@@ -499,7 +503,7 @@ mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
/* passing 0 disables given tx queue */
ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to stop txq %d\n", queue_id);
+ MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
return ret;
}
@@ -546,7 +550,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
priv->bpool_init_size += buffs_to_add;
ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n");
+ MRVL_LOG(ERR, "Failed to add buffers to bpool");
}
/*
@@ -561,7 +565,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to init ppio\n");
+ MRVL_LOG(ERR, "Failed to init ppio");
return ret;
}
@@ -574,8 +578,8 @@ mrvl_dev_start(struct rte_eth_dev *dev)
if (!priv->uc_mc_flushed) {
ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
if (ret) {
- RTE_LOG(ERR, PMD,
- "Failed to flush uc/mc filter list\n");
+ MRVL_LOG(ERR,
+ "Failed to flush uc/mc filter list");
goto out;
}
priv->uc_mc_flushed = 1;
@@ -584,7 +588,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
if (!priv->vlan_flushed) {
ret = pp2_ppio_flush_vlan(priv->ppio);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to flush vlan list\n");
+ MRVL_LOG(ERR, "Failed to flush vlan list");
/*
* TODO
* once pp2_ppio_flush_vlan() is supported jump to out
@@ -598,14 +602,14 @@ mrvl_dev_start(struct rte_eth_dev *dev)
if (mrvl_qos_cfg) {
ret = mrvl_start_qos_mapping(priv);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n");
+ MRVL_LOG(ERR, "Failed to setup QoS mapping");
goto out;
}
}
ret = mrvl_dev_set_link_up(dev);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to set link up\n");
+ MRVL_LOG(ERR, "Failed to set link up");
goto out;
}
@@ -629,7 +633,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
return 0;
out:
- RTE_LOG(ERR, PMD, "Failed to start device\n");
+ MRVL_LOG(ERR, "Failed to start device");
pp2_ppio_deinit(priv->ppio);
return ret;
}
@@ -645,7 +649,7 @@ mrvl_flush_rx_queues(struct rte_eth_dev *dev)
{
int i;
- RTE_LOG(INFO, PMD, "Flushing rx queues\n");
+ MRVL_LOG(INFO, "Flushing rx queues");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
int ret, num;
@@ -674,7 +678,7 @@ mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
int i, j;
struct mrvl_txq *txq;
- RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
+ MRVL_LOG(INFO, "Flushing tx shadow queues");
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = (struct mrvl_txq *)dev->data->tx_queues[i];
@@ -722,7 +726,7 @@ mrvl_flush_bpool(struct rte_eth_dev *dev)
ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
+ MRVL_LOG(ERR, "Failed to get bpool buffers number");
return;
}
@@ -887,7 +891,7 @@ mrvl_promiscuous_enable(struct rte_eth_dev *dev)
ret = pp2_ppio_set_promisc(priv->ppio, 1);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
+ MRVL_LOG(ERR, "Failed to enable promiscuous mode");
}
/**
@@ -910,7 +914,7 @@ mrvl_allmulticast_enable(struct rte_eth_dev *dev)
ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
if (ret)
- RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
+ MRVL_LOG(ERR, "Failed enable all-multicast mode");
}
/**
@@ -930,7 +934,7 @@ mrvl_promiscuous_disable(struct rte_eth_dev *dev)
ret = pp2_ppio_set_promisc(priv->ppio, 0);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
+ MRVL_LOG(ERR, "Failed to disable promiscuous mode");
}
/**
@@ -950,7 +954,7 @@ mrvl_allmulticast_disable(struct rte_eth_dev *dev)
ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
+ MRVL_LOG(ERR, "Failed to disable all-multicast mode");
}
/**
@@ -979,7 +983,7 @@ mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
if (ret) {
ether_format_addr(buf, sizeof(buf),
&dev->data->mac_addrs[index]);
- RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf);
+ MRVL_LOG(ERR, "Failed to remove mac %s", buf);
}
}
@@ -1032,7 +1036,7 @@ mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
if (ret) {
ether_format_addr(buf, sizeof(buf), mac_addr);
- RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf);
+ MRVL_LOG(ERR, "Failed to add mac %s", buf);
return -1;
}
@@ -1066,7 +1070,7 @@ mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
if (ret) {
char buf[ETHER_ADDR_FMT_SIZE];
ether_format_addr(buf, sizeof(buf), mac_addr);
- RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf);
+ MRVL_LOG(ERR, "Failed to set mac to %s", buf);
}
return ret;
@@ -1103,8 +1107,8 @@ mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
idx = rxq->queue_id;
if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
- RTE_LOG(ERR, PMD,
- "rx queue %d stats out of range (0 - %d)\n",
+ MRVL_LOG(ERR,
+ "rx queue %d stats out of range (0 - %d)",
idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
continue;
}
@@ -1114,8 +1118,8 @@ mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
priv->rxq_map[idx].inq,
&rx_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD,
- "Failed to update rx queue %d stats\n", idx);
+ MRVL_LOG(ERR,
+ "Failed to update rx queue %d stats", idx);
break;
}
@@ -1138,16 +1142,16 @@ mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
idx = txq->queue_id;
if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
- RTE_LOG(ERR, PMD,
- "tx queue %d stats out of range (0 - %d)\n",
+ MRVL_LOG(ERR,
+ "tx queue %d stats out of range (0 - %d)",
idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
}
ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
&tx_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD,
- "Failed to update tx queue %d stats\n", idx);
+ MRVL_LOG(ERR,
+ "Failed to update tx queue %d stats", idx);
break;
}
@@ -1158,7 +1162,7 @@ mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD, "Failed to update port statistics\n");
+ MRVL_LOG(ERR, "Failed to update port statistics");
return ret;
}
@@ -1480,8 +1484,8 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
for (i = 0; i < num; i++) {
if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
!= cookie_addr_high) {
- RTE_LOG(ERR, PMD,
- "mbuf virtual addr high 0x%lx out of range\n",
+ MRVL_LOG(ERR,
+ "mbuf virtual addr high 0x%lx out of range",
(uint64_t)mbufs[i] >> 32);
goto out;
}
@@ -1544,7 +1548,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
/*
* Unknown TC mapping, mapping will not have a correct queue.
*/
- RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
+ MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
idx, priv->ppio_id);
return -EFAULT;
}
@@ -1552,8 +1556,8 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
MRVL_PKT_EFFEC_OFFS;
if (min_size < max_rx_pkt_len) {
- RTE_LOG(ERR, PMD,
- "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
+ MRVL_LOG(ERR,
+ "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.",
max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
MRVL_PKT_EFFEC_OFFS,
max_rx_pkt_len);
@@ -1612,9 +1616,12 @@ mrvl_rx_queue_release(void *rxq)
if (core_id == LCORE_ID_ANY)
core_id = 0;
+ if (!q)
+ return;
+
hif = mrvl_get_hif(q->priv, core_id);
- if (!q || !hif)
+ if (!hif)
return;
tc = q->priv->rxq_map[q->queue_id].tc;
@@ -1717,7 +1724,7 @@ mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to read rx pause state\n");
+ MRVL_LOG(ERR, "Failed to read rx pause state");
return ret;
}
@@ -1750,7 +1757,7 @@ mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
fc_conf->pause_time ||
fc_conf->mac_ctrl_frame_fwd ||
fc_conf->autoneg) {
- RTE_LOG(ERR, PMD, "Flowctrl parameter is not supported\n");
+ MRVL_LOG(ERR, "Flowctrl parameter is not supported");
return -EINVAL;
}
@@ -1762,8 +1769,8 @@ mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
ret = pp2_ppio_set_rx_pause(priv->ppio, en);
if (ret)
- RTE_LOG(ERR, PMD,
- "Failed to change flowctrl on RX side\n");
+ MRVL_LOG(ERR,
+ "Failed to change flowctrl on RX side");
return ret;
}
@@ -1854,7 +1861,7 @@ mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
*(const void **)arg = &mrvl_flow_ops;
return 0;
default:
- RTE_LOG(WARNING, PMD, "Filter type (%d) not supported",
+ MRVL_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
return -EINVAL;
}
@@ -1951,7 +1958,7 @@ mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
*l4_offset = *l3_offset + MRVL_ARP_LENGTH;
break;
default:
- RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n");
+ MRVL_LOG(DEBUG, "Failed to recognise l3 packet type");
break;
}
@@ -1963,7 +1970,7 @@ mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
packet_type |= RTE_PTYPE_L4_UDP;
break;
default:
- RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n");
+ MRVL_LOG(DEBUG, "Failed to recognise l4 packet type");
break;
}
@@ -2034,7 +2041,7 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
if (unlikely(ret < 0)) {
- RTE_LOG(ERR, PMD, "Failed to receive packets\n");
+ MRVL_LOG(ERR, "Failed to receive packets");
return 0;
}
mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
@@ -2101,15 +2108,15 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
(!rx_done && num < q->priv->bpool_init_size))) {
ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
+ MRVL_LOG(ERR, "Failed to fill bpool");
} else if (unlikely(num > q->priv->bpool_max_size)) {
int i;
int pkt_to_remove = num - q->priv->bpool_init_size;
struct rte_mbuf *mbuf;
struct pp2_buff_inf buff;
- RTE_LOG(DEBUG, PMD,
- "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
+ MRVL_LOG(DEBUG,
+ "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)",
bpool->pp2_id, q->priv->ppio->port_id,
bpool->id, pkt_to_remove, num,
q->priv->bpool_init_size);
@@ -2229,8 +2236,8 @@ mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
for (i = 0; i < nb_done; i++) {
entry = &sq->ent[sq->tail + num];
if (unlikely(!entry->buff.addr)) {
- RTE_LOG(ERR, PMD,
- "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
+ MRVL_LOG(ERR,
+ "Shadow memory @%d: cookie(%lx), pa(%lx)!",
sq->tail, (u64)entry->buff.cookie,
(u64)entry->buff.addr);
skip_bufs = 1;
@@ -2307,8 +2314,8 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
if (unlikely(nb_pkts > sq_free_size)) {
- RTE_LOG(DEBUG, PMD,
- "No room in shadow queue for %d packets! %d packets will be sent.\n",
+ MRVL_LOG(DEBUG,
+ "No room in shadow queue for %d packets! %d packets will be sent.",
nb_pkts, sq_free_size);
nb_pkts = sq_free_size;
}
@@ -2494,7 +2501,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
rte_zmalloc("mac_addrs",
ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
if (!eth_dev->data->mac_addrs) {
- RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
+ MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
ret = -ENOMEM;
goto out_free_priv;
}
@@ -2633,9 +2640,9 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
*/
if (!mrvl_qos_cfg) {
cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
- RTE_LOG(INFO, PMD, "Parsing config file!\n");
+ MRVL_LOG(INFO, "Parsing config file!");
if (cfgnum > 1) {
- RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
+ MRVL_LOG(ERR, "Cannot handle more than one config file!");
goto out_free_kvlist;
} else if (cfgnum == 1) {
rte_kvargs_process(kvlist, MRVL_CFG_ARG,
@@ -2646,7 +2653,7 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
if (mrvl_dev_num)
goto init_devices;
- RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n");
+ MRVL_LOG(INFO, "Perform MUSDK initializations");
/*
* ret == -EEXIST is correct, it means DMA
* has been already initialized (by another PMD).
@@ -2656,13 +2663,13 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
if (ret != -EEXIST)
goto out_free_kvlist;
else
- RTE_LOG(INFO, PMD,
- "DMA memory has been already initialized by a different driver.\n");
+ MRVL_LOG(INFO,
+ "DMA memory has been already initialized by a different driver.");
}
ret = mrvl_init_pp2();
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to init PP!\n");
+ MRVL_LOG(ERR, "Failed to init PP!");
goto out_deinit_dma;
}
@@ -2674,7 +2681,7 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
init_devices:
for (i = 0; i < ifnum; i++) {
- RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]);
+ MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
if (ret)
goto out_cleanup;
@@ -2718,7 +2725,7 @@ rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
if (!name)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Removing %s\n", name);
+ MRVL_LOG(INFO, "Removing %s", name);
RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */
char ifname[RTE_ETH_NAME_MAX_LEN];
@@ -2729,7 +2736,7 @@ rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
}
if (mrvl_dev_num == 0) {
- RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n");
+ MRVL_LOG(INFO, "Perform MUSDK deinit");
mrvl_deinit_hifs();
mrvl_deinit_pp2();
mv_sys_dma_mem_destroy();
@@ -2745,3 +2752,10 @@ static struct rte_vdev_driver pmd_mrvl_drv = {
RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
+
+RTE_INIT(mrvl_init_log)
+{
+ mrvl_logtype = rte_log_register("pmd.net.mvpp2");
+ if (mrvl_logtype >= 0)
+ rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/mvpp2/mrvl_ethdev.h b/drivers/net/mvpp2/mrvl_ethdev.h
index 3a428092..3726f788 100644
--- a/drivers/net/mvpp2/mrvl_ethdev.h
+++ b/drivers/net/mvpp2/mrvl_ethdev.h
@@ -98,4 +98,12 @@ struct mrvl_priv {
/** Flow operations forward declaration. */
extern const struct rte_flow_ops mrvl_flow_ops;
+
+/** Current log type. */
+extern int mrvl_logtype;
+
+#define MRVL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, mrvl_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
#endif /* _MRVL_ETHDEV_H_ */
diff --git a/drivers/net/mvpp2/mrvl_flow.c b/drivers/net/mvpp2/mrvl_flow.c
index 437c987c..ecc34192 100644
--- a/drivers/net/mvpp2/mrvl_flow.c
+++ b/drivers/net/mvpp2/mrvl_flow.c
@@ -1054,7 +1054,7 @@ mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
}
if (mask->type) {
- RTE_LOG(WARNING, PMD, "eth type mask is ignored\n");
+ MRVL_LOG(WARNING, "eth type mask is ignored");
ret = mrvl_parse_type(spec, mask, flow);
if (ret)
goto out;
@@ -1093,14 +1093,14 @@ mrvl_parse_vlan(const struct rte_flow_item *item,
m = rte_be_to_cpu_16(mask->tci);
if (m & MRVL_VLAN_ID_MASK) {
- RTE_LOG(WARNING, PMD, "vlan id mask is ignored\n");
+ MRVL_LOG(WARNING, "vlan id mask is ignored");
ret = mrvl_parse_vlan_id(spec, mask, flow);
if (ret)
goto out;
}
if (m & MRVL_VLAN_PRI_MASK) {
- RTE_LOG(WARNING, PMD, "vlan pri mask is ignored\n");
+ MRVL_LOG(WARNING, "vlan pri mask is ignored");
ret = mrvl_parse_vlan_pri(spec, mask, flow);
if (ret)
goto out;
@@ -1109,7 +1109,7 @@ mrvl_parse_vlan(const struct rte_flow_item *item,
if (flow->pattern & F_TYPE) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "VLAN TPID matching is not supported\n");
+ "VLAN TPID matching is not supported");
return -rte_errno;
}
if (mask->inner_type) {
@@ -1120,7 +1120,7 @@ mrvl_parse_vlan(const struct rte_flow_item *item,
.type = mask->inner_type,
};
- RTE_LOG(WARNING, PMD, "inner eth type mask is ignored\n");
+ MRVL_LOG(WARNING, "inner eth type mask is ignored");
ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
if (ret)
goto out;
@@ -1186,7 +1186,7 @@ mrvl_parse_ip4(const struct rte_flow_item *item,
}
if (mask->hdr.next_proto_id) {
- RTE_LOG(WARNING, PMD, "next proto id mask is ignored\n");
+ MRVL_LOG(WARNING, "next proto id mask is ignored");
ret = mrvl_parse_ip4_proto(spec, mask, flow);
if (ret)
goto out;
@@ -1257,7 +1257,7 @@ mrvl_parse_ip6(const struct rte_flow_item *item,
}
if (mask->hdr.proto) {
- RTE_LOG(WARNING, PMD, "next header mask is ignored\n");
+ MRVL_LOG(WARNING, "next header mask is ignored");
ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
if (ret)
goto out;
@@ -1306,14 +1306,14 @@ mrvl_parse_tcp(const struct rte_flow_item *item,
}
if (mask->hdr.src_port) {
- RTE_LOG(WARNING, PMD, "tcp sport mask is ignored\n");
+ MRVL_LOG(WARNING, "tcp sport mask is ignored");
ret = mrvl_parse_tcp_sport(spec, mask, flow);
if (ret)
goto out;
}
if (mask->hdr.dst_port) {
- RTE_LOG(WARNING, PMD, "tcp dport mask is ignored\n");
+ MRVL_LOG(WARNING, "tcp dport mask is ignored");
ret = mrvl_parse_tcp_dport(spec, mask, flow);
if (ret)
goto out;
@@ -1357,14 +1357,14 @@ mrvl_parse_udp(const struct rte_flow_item *item,
}
if (mask->hdr.src_port) {
- RTE_LOG(WARNING, PMD, "udp sport mask is ignored\n");
+ MRVL_LOG(WARNING, "udp sport mask is ignored");
ret = mrvl_parse_udp_sport(spec, mask, flow);
if (ret)
goto out;
}
if (mask->hdr.dst_port) {
- RTE_LOG(WARNING, PMD, "udp dport mask is ignored\n");
+ MRVL_LOG(WARNING, "udp dport mask is ignored");
ret = mrvl_parse_udp_dport(spec, mask, flow);
if (ret)
goto out;
@@ -2280,8 +2280,8 @@ mrvl_flow_parse_actions(struct mrvl_priv *priv,
* Unknown TC mapping, mapping will not have
* a correct queue.
*/
- RTE_LOG(ERR, PMD,
- "Unknown TC mapping for queue %hu eth%hhu\n",
+ MRVL_LOG(ERR,
+ "Unknown TC mapping for queue %hu eth%hhu",
q->index, priv->ppio_id);
rte_flow_error_set(error, EFAULT,
@@ -2290,8 +2290,8 @@ mrvl_flow_parse_actions(struct mrvl_priv *priv,
return -rte_errno;
}
- RTE_LOG(DEBUG, PMD,
- "Action: Assign packets to queue %d, tc:%d, q:%d\n",
+ MRVL_LOG(DEBUG,
+ "Action: Assign packets to queue %d, tc:%d, q:%d",
q->index, priv->rxq_map[q->index].tc,
priv->rxq_map[q->index].inq);
@@ -2384,7 +2384,7 @@ mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
- RTE_LOG(INFO, PMD, "Setting cls search engine type to %s\n",
+ MRVL_LOG(INFO, "Setting cls search engine type to %s",
priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
"exact" : "maskable");
priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
diff --git a/drivers/net/mvpp2/mrvl_qos.c b/drivers/net/mvpp2/mrvl_qos.c
index 70d000ca..71856c1a 100644
--- a/drivers/net/mvpp2/mrvl_qos.c
+++ b/drivers/net/mvpp2/mrvl_qos.c
@@ -138,7 +138,7 @@ get_outq_cfg(struct rte_cfgfile *file, int port, int outq,
cfg->port[port].outq[outq].sched_mode =
PP2_PPIO_SCHED_M_WRR;
} else {
- RTE_LOG(ERR, PMD, "Unknown token: %s\n", entry);
+ MRVL_LOG(ERR, "Unknown token: %s", entry);
return -1;
}
}
@@ -159,7 +159,7 @@ get_outq_cfg(struct rte_cfgfile *file, int port, int outq,
* global port rate limiting has priority.
*/
if (cfg->port[port].rate_limit_enable) {
- RTE_LOG(WARNING, PMD, "Port %d rate limiting already enabled\n",
+ MRVL_LOG(WARNING, "Port %d rate limiting already enabled",
port);
return 0;
}
@@ -340,7 +340,7 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
RTE_DIM(cfg->port[port].tc[tc].inq),
MRVL_PP2_RXQ_MAX);
if (n < 0) {
- RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
n, entry);
return n;
}
@@ -355,7 +355,7 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
RTE_DIM(cfg->port[port].tc[tc].pcp),
MAX_PCP);
if (n < 0) {
- RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
n, entry);
return n;
}
@@ -370,7 +370,7 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
RTE_DIM(cfg->port[port].tc[tc].dscp),
MAX_DSCP);
if (n < 0) {
- RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
n, entry);
return n;
}
@@ -390,7 +390,7 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_RED))) {
cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_RED;
} else {
- RTE_LOG(ERR, PMD, "Error while parsing: %s\n", entry);
+ MRVL_LOG(ERR, "Error while parsing: %s", entry);
return -1;
}
}
@@ -435,7 +435,7 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
if (n == 0) {
/* This is weird, but not bad. */
- RTE_LOG(WARNING, PMD, "Empty configuration file?\n");
+ MRVL_LOG(WARNING, "Empty configuration file?");
return 0;
}
@@ -461,8 +461,8 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
return -1;
(*cfg)->port[n].default_tc = (uint8_t)val;
} else {
- RTE_LOG(ERR, PMD,
- "Default Traffic Class required in custom configuration!\n");
+ MRVL_LOG(ERR,
+ "Default Traffic Class required in custom configuration!");
return -1;
}
@@ -489,7 +489,7 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
sizeof(MRVL_TOK_PLCR_UNIT_PACKETS))) {
unit = PP2_CLS_PLCR_PACKETS_TOKEN_UNIT;
} else {
- RTE_LOG(ERR, PMD, "Unknown token: %s\n",
+ MRVL_LOG(ERR, "Unknown token: %s",
entry);
return -1;
}
@@ -511,8 +511,8 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
sizeof(MRVL_TOK_PLCR_COLOR_AWARE))) {
mode = PP2_CLS_PLCR_COLOR_AWARE_MODE;
} else {
- RTE_LOG(ERR, PMD,
- "Error in parsing: %s\n",
+ MRVL_LOG(ERR,
+ "Error in parsing: %s",
entry);
return -1;
}
@@ -682,7 +682,7 @@ setup_policer(struct mrvl_priv *priv, struct pp2_cls_plcr_params *params)
ret = pp2_cls_plcr_init(params, &priv->policer);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to setup %s\n", match);
+ MRVL_LOG(ERR, "Failed to setup %s", match);
return -1;
}
@@ -742,8 +742,8 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
if (port_cfg->tc[tc].pcps > RTE_DIM(port_cfg->tc[0].pcp)) {
/* Better safe than sorry. */
- RTE_LOG(ERR, PMD,
- "Too many PCPs configured in TC %zu!\n", tc);
+ MRVL_LOG(ERR,
+ "Too many PCPs configured in TC %zu!", tc);
return -1;
}
for (i = 0; i < port_cfg->tc[tc].pcps; ++i) {
@@ -764,8 +764,8 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
if (port_cfg->tc[tc].dscps > RTE_DIM(port_cfg->tc[0].dscp)) {
/* Better safe than sorry. */
- RTE_LOG(ERR, PMD,
- "Too many DSCPs configured in TC %zu!\n", tc);
+ MRVL_LOG(ERR,
+ "Too many DSCPs configured in TC %zu!", tc);
return -1;
}
for (i = 0; i < port_cfg->tc[tc].dscps; ++i) {
@@ -786,8 +786,8 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
if (port_cfg->tc[tc].inqs > RTE_DIM(port_cfg->tc[0].inq)) {
/* Overflow. */
- RTE_LOG(ERR, PMD,
- "Too many RX queues configured per TC %zu!\n",
+ MRVL_LOG(ERR,
+ "Too many RX queues configured per TC %zu!",
tc);
return -1;
}
@@ -795,7 +795,7 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
uint8_t idx = port_cfg->tc[tc].inq[i];
if (idx > RTE_DIM(priv->rxq_map)) {
- RTE_LOG(ERR, PMD, "Bad queue index %d!\n", idx);
+ MRVL_LOG(ERR, "Bad queue index %d!", idx);
return -1;
}
@@ -878,7 +878,7 @@ mrvl_start_qos_mapping(struct mrvl_priv *priv)
size_t i;
if (priv->ppio == NULL) {
- RTE_LOG(ERR, PMD, "ppio must not be NULL here!\n");
+ MRVL_LOG(ERR, "ppio must not be NULL here!");
return -1;
}
diff --git a/drivers/net/netvsc/Makefile b/drivers/net/netvsc/Makefile
new file mode 100644
index 00000000..3c713af3
--- /dev/null
+++ b/drivers/net/netvsc/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_pmd_netvsc.a
+
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+EXPORT_MAP := rte_pmd_netvsc_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rndis.c
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_nvs.c
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vmbus
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
new file mode 100644
index 00000000..78b842ba
--- /dev/null
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -0,0 +1,761 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Microsoft Corporation
+ * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_cycles.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus_vmbus.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_rndis.h"
+#include "hn_nvs.h"
+#include "ndis.h"
+
+#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS | \
+ DEV_TX_OFFLOAD_VLAN_INSERT)
+
+#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_CRC_STRIP)
+
+int hn_logtype_init;
+int hn_logtype_driver;
+
+struct hn_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+static const struct hn_xstats_name_off hn_stat_strings[] = {
+ { "good_packets", offsetof(struct hn_stats, packets) },
+ { "good_bytes", offsetof(struct hn_stats, bytes) },
+ { "errors", offsetof(struct hn_stats, errors) },
+ { "allocation_failed", offsetof(struct hn_stats, nomemory) },
+ { "multicast_packets", offsetof(struct hn_stats, multicast) },
+ { "broadcast_packets", offsetof(struct hn_stats, broadcast) },
+ { "undersize_packets", offsetof(struct hn_stats, size_bins[0]) },
+ { "size_64_packets", offsetof(struct hn_stats, size_bins[1]) },
+ { "size_65_127_packets", offsetof(struct hn_stats, size_bins[2]) },
+ { "size_128_255_packets", offsetof(struct hn_stats, size_bins[3]) },
+ { "size_256_511_packets", offsetof(struct hn_stats, size_bins[4]) },
+ { "size_512_1023_packets", offsetof(struct hn_stats, size_bins[5]) },
+ { "size_1024_1518_packets", offsetof(struct hn_stats, size_bins[6]) },
+ { "size_1519_max_packets", offsetof(struct hn_stats, size_bins[7]) },
+};
+
+static struct rte_eth_dev *
+eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
+{
+ struct rte_eth_dev *eth_dev;
+ const char *name;
+
+ if (!dev)
+ return NULL;
+
+ name = dev->device.name;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(NOTICE, "can not allocate rte ethdev");
+ return NULL;
+ }
+
+ if (private_data_size) {
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, private_data_size,
+ RTE_CACHE_LINE_SIZE, dev->device.numa_node);
+ if (!eth_dev->data->dev_private) {
+ PMD_DRV_LOG(NOTICE, "can not allocate driver data");
+ rte_eth_dev_release_port(eth_dev);
+ return NULL;
+ }
+ }
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(NOTICE, "can not attach secondary");
+ return NULL;
+ }
+ }
+
+ eth_dev->device = &dev->device;
+ eth_dev->intr_handle = &dev->intr_handle;
+
+ return eth_dev;
+}
+
+static void
+eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
+{
+ /* free ether device */
+ rte_eth_dev_release_port(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ eth_dev->data->dev_private = NULL;
+
+ /*
+ * Secondary process will check the name to attach.
+ * Clear this field to avoid attaching a released ports.
+ */
+ eth_dev->data->name[0] = '\0';
+
+ eth_dev->device = NULL;
+ eth_dev->intr_handle = NULL;
+}
+
+/* Update link status.
+ * Note: the DPDK definition of "wait_to_complete"
+ * means block this call until link is up.
+ * which is not worth supporting.
+ */
+static int
+hn_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_link link, old;
+ int error;
+
+ old = dev->data->dev_link;
+
+ error = hn_rndis_get_linkstatus(hv);
+ if (error)
+ return error;
+
+ hn_rndis_get_linkspeed(hv);
+
+ link = (struct rte_eth_link) {
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_autoneg = ETH_LINK_SPEED_FIXED,
+ .link_speed = hv->link_speed / 10000,
+ };
+
+ if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
+ link.link_status = ETH_LINK_UP;
+ else
+ link.link_status = ETH_LINK_DOWN;
+
+ if (old.link_status == link.link_status)
+ return 0;
+
+ PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
+ (link.link_status == ETH_LINK_UP) ? "up" : "down");
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static void hn_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
+ dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_pktlen = HN_MAX_XFER_LEN;
+ dev_info->max_mac_addrs = 1;
+
+ dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
+ dev_info->flow_type_rss_offloads =
+ ETH_RSS_IPV4 | ETH_RSS_IPV6 | ETH_RSS_TCP | ETH_RSS_UDP;
+
+ dev_info->max_rx_queues = hv->max_queues;
+ dev_info->max_tx_queues = hv->max_queues;
+
+ hn_rndis_get_offload(hv, dev_info);
+}
+
+static void
+hn_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
+}
+
+static void
+hn_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ uint32_t filter;
+
+ filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST;
+ if (dev->data->all_multicast)
+ filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
+ hn_rndis_set_rxfilter(hv, filter);
+}
+
+static void
+hn_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_BROADCAST);
+}
+
+static void
+hn_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
+ NDIS_PACKET_TYPE_BROADCAST);
+}
+
+/* Setup shared rx/tx queue data */
+static int hn_subchan_configure(struct hn_data *hv,
+ uint32_t subchan)
+{
+ struct vmbus_channel *primary = hn_primary_chan(hv);
+ int err;
+ unsigned int retry = 0;
+
+ PMD_DRV_LOG(DEBUG,
+ "open %u subchannels", subchan);
+
+ /* Send create sub channels command */
+ err = hn_nvs_alloc_subchans(hv, &subchan);
+ if (err)
+ return err;
+
+ while (subchan > 0) {
+ struct vmbus_channel *new_sc;
+ uint16_t chn_index;
+
+ err = rte_vmbus_subchan_open(primary, &new_sc);
+ if (err == -ENOENT && ++retry < 1000) {
+ /* This can happen if not ready yet */
+ rte_delay_ms(10);
+ continue;
+ }
+
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "open subchannel failed: %d", err);
+ return err;
+ }
+
+ retry = 0;
+ chn_index = rte_vmbus_sub_channel_index(new_sc);
+ if (chn_index == 0 || chn_index > hv->max_queues) {
+ PMD_DRV_LOG(ERR,
+ "Invalid subchannel offermsg channel %u",
+ chn_index);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(DEBUG, "new sub channel %u", chn_index);
+ hv->channels[chn_index] = new_sc;
+ --subchan;
+ }
+
+ return err;
+}
+
+static int hn_dev_configure(struct rte_eth_dev *dev)
+{
+ const struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
+ const struct rte_eth_txmode *txmode = &dev_conf->txmode;
+
+ const struct rte_eth_rss_conf *rss_conf =
+ &dev_conf->rx_adv_conf.rss_conf;
+ struct hn_data *hv = dev->data->dev_private;
+ uint64_t unsupported;
+ int err, subchan;
+
+ PMD_INIT_FUNC_TRACE();
+
+ unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
+ if (unsupported) {
+ PMD_DRV_LOG(NOTICE,
+ "unsupported TX offload: %#" PRIx64,
+ unsupported);
+ return -EINVAL;
+ }
+
+ unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS;
+ if (unsupported) {
+ PMD_DRV_LOG(NOTICE,
+ "unsupported RX offload: %#" PRIx64,
+ rxmode->offloads);
+ return -EINVAL;
+ }
+
+ err = hn_rndis_conf_offload(hv, txmode->offloads,
+ rxmode->offloads);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "offload configure failed");
+ return err;
+ }
+
+ hv->num_queues = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+ subchan = hv->num_queues - 1;
+ if (subchan > 0) {
+ err = hn_subchan_configure(hv, subchan);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "subchannel configuration failed");
+ return err;
+ }
+
+ err = hn_rndis_conf_rss(hv, rss_conf);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "rss configuration failed");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int hn_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ stats->opackets += txq->stats.packets;
+ stats->obytes += txq->stats.bytes;
+ stats->oerrors += txq->stats.errors + txq->stats.nomemory;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txq->stats.packets;
+ stats->q_obytes[i] = txq->stats.bytes;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ stats->ipackets += rxq->stats.packets;
+ stats->ibytes += rxq->stats.bytes;
+ stats->ierrors += rxq->stats.errors;
+ stats->imissed += rxq->ring_full;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxq->stats.packets;
+ stats->q_ibytes[i] = rxq->stats.bytes;
+ }
+ }
+
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+ return 0;
+}
+
+static void
+hn_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+ memset(&txq->stats, 0, sizeof(struct hn_stats));
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ memset(&rxq->stats, 0, sizeof(struct hn_stats));
+ rxq->ring_full = 0;
+ }
+}
+
+static int
+hn_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ unsigned int i, t, count = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!xstats_names)
+ return dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
+ + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ snprintf(xstats_names[count++].name,
+ RTE_ETH_XSTATS_NAME_SIZE,
+ "tx_q%u_%s", i, hn_stat_strings[t].name);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ snprintf(xstats_names[count++].name,
+ RTE_ETH_XSTATS_NAME_SIZE,
+ "rx_q%u_%s", i,
+ hn_stat_strings[t].name);
+ }
+
+ return count;
+}
+
+static int
+hn_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i, t, count = 0;
+
+ const unsigned int nstats =
+ dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
+ + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+ const char *stats;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ stats = (const char *)&txq->stats;
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ xstats[count++].value = *(const uint64_t *)
+ (stats + hn_stat_strings[t].offset);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ stats = (const char *)&rxq->stats;
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ xstats[count++].value = *(const uint64_t *)
+ (stats + hn_stat_strings[t].offset);
+ }
+
+ return count;
+}
+
+static int
+hn_dev_start(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ PMD_DRV_LOG(ERR, "link status not supported yet");
+ return -ENOTSUP;
+ }
+
+ return hn_rndis_set_rxfilter(hv,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+}
+
+static void
+hn_dev_stop(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hn_rndis_set_rxfilter(hv, 0);
+}
+
+static void
+hn_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_INIT_LOG(DEBUG, "close");
+}
+
+static const struct eth_dev_ops hn_eth_dev_ops = {
+ .dev_configure = hn_dev_configure,
+ .dev_start = hn_dev_start,
+ .dev_stop = hn_dev_stop,
+ .dev_close = hn_dev_close,
+ .dev_infos_get = hn_dev_info_get,
+ .txq_info_get = hn_dev_tx_queue_info,
+ .rxq_info_get = hn_dev_rx_queue_info,
+ .promiscuous_enable = hn_dev_promiscuous_enable,
+ .promiscuous_disable = hn_dev_promiscuous_disable,
+ .allmulticast_enable = hn_dev_allmulticast_enable,
+ .allmulticast_disable = hn_dev_allmulticast_disable,
+ .tx_queue_setup = hn_dev_tx_queue_setup,
+ .tx_queue_release = hn_dev_tx_queue_release,
+ .rx_queue_setup = hn_dev_rx_queue_setup,
+ .rx_queue_release = hn_dev_rx_queue_release,
+ .link_update = hn_dev_link_update,
+ .stats_get = hn_dev_stats_get,
+ .xstats_get = hn_dev_xstats_get,
+ .xstats_get_names = hn_dev_xstats_get_names,
+ .stats_reset = hn_dev_stats_reset,
+ .xstats_reset = hn_dev_stats_reset,
+};
+
+/*
+ * Setup connection between PMD and kernel.
+ */
+static int
+hn_attach(struct hn_data *hv, unsigned int mtu)
+{
+ int error;
+
+ /* Attach NVS */
+ error = hn_nvs_attach(hv, mtu);
+ if (error)
+ goto failed_nvs;
+
+ /* Attach RNDIS */
+ error = hn_rndis_attach(hv);
+ if (error)
+ goto failed_rndis;
+
+ /*
+ * NOTE:
+ * Under certain conditions on certain versions of Hyper-V,
+ * the RNDIS rxfilter is _not_ zero on the hypervisor side
+ * after the successful RNDIS initialization.
+ */
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_NONE);
+ return 0;
+failed_rndis:
+ hn_nvs_detach(hv);
+failed_nvs:
+ return error;
+}
+
+static void
+hn_detach(struct hn_data *hv)
+{
+ hn_nvs_detach(hv);
+ hn_rndis_detach(hv);
+}
+
+static int
+eth_hn_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct hn_data *hv = eth_dev->data->dev_private;
+ struct rte_device *device = eth_dev->device;
+ struct rte_vmbus_device *vmbus;
+ unsigned int rxr_cnt;
+ int err, max_chan;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vmbus = container_of(device, struct rte_vmbus_device, device);
+ eth_dev->dev_ops = &hn_eth_dev_ops;
+ eth_dev->tx_pkt_burst = &hn_xmit_pkts;
+ eth_dev->rx_pkt_burst = &hn_recv_pkts;
+
+ /*
+ * for secondary processes, we don't initialize any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Since Hyper-V only supports one MAC address, just use local data */
+ eth_dev->data->mac_addrs = &hv->mac_addr;
+
+ hv->vmbus = vmbus;
+ hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
+ hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP];
+ hv->port_id = eth_dev->data->port_id;
+
+ /* Initialize primary channel input for control operations */
+ err = rte_vmbus_chan_open(vmbus, &hv->channels[0]);
+ if (err)
+ return err;
+
+ hv->primary = hn_rx_queue_alloc(hv, 0,
+ eth_dev->device->numa_node);
+
+ if (!hv->primary)
+ return -ENOMEM;
+
+ err = hn_attach(hv, ETHER_MTU);
+ if (err)
+ goto failed;
+
+ err = hn_tx_pool_init(eth_dev);
+ if (err)
+ goto failed;
+
+ err = hn_rndis_get_eaddr(hv, hv->mac_addr.addr_bytes);
+ if (err)
+ goto failed;
+
+ max_chan = rte_vmbus_max_channels(vmbus);
+ PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan);
+ if (max_chan <= 0)
+ goto failed;
+
+ if (hn_rndis_query_rsscaps(hv, &rxr_cnt) != 0)
+ rxr_cnt = 1;
+
+ hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
+
+ return 0;
+
+failed:
+ PMD_INIT_LOG(NOTICE, "device init failed");
+
+ hn_detach(hv);
+ return err;
+}
+
+static int
+eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct hn_data *hv = eth_dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ hn_dev_stop(eth_dev);
+ hn_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+
+ hn_detach(hv);
+ rte_vmbus_chan_close(hv->primary->chan);
+ rte_free(hv->primary);
+
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused,
+ struct rte_vmbus_device *dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev = eth_dev_vmbus_allocate(dev, sizeof(struct hn_data));
+ if (!eth_dev)
+ return -ENOMEM;
+
+ ret = eth_hn_dev_init(eth_dev);
+ if (ret)
+ eth_dev_vmbus_release(eth_dev);
+ else
+ rte_eth_dev_probing_finish(eth_dev);
+
+ return ret;
+}
+
+static int eth_hn_remove(struct rte_vmbus_device *dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev = rte_eth_dev_allocated(dev->device.name);
+ if (!eth_dev)
+ return -ENODEV;
+
+ ret = eth_hn_dev_uninit(eth_dev);
+ if (ret)
+ return ret;
+
+ eth_dev_vmbus_release(eth_dev);
+ return 0;
+}
+
+/* Network device GUID */
+static const rte_uuid_t hn_net_ids[] = {
+ /* f8615163-df3e-46c5-913f-f2d2f965ed0e */
+ RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL),
+ { 0 }
+};
+
+static struct rte_vmbus_driver rte_netvsc_pmd = {
+ .id_table = hn_net_ids,
+ .probe = eth_hn_probe,
+ .remove = eth_hn_remove,
+};
+
+RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
+
+RTE_INIT(hn_init_log);
+static void
+hn_init_log(void)
+{
+ hn_logtype_init = rte_log_register("pmd.net.netvsc.init");
+ if (hn_logtype_init >= 0)
+ rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE);
+ hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver");
+ if (hn_logtype_driver >= 0)
+ rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/netvsc/hn_logs.h b/drivers/net/netvsc/hn_logs.h
new file mode 100644
index 00000000..cddadef0
--- /dev/null
+++ b/drivers/net/netvsc/hn_logs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef _HN_LOGS_H_
+#define _HN_LOGS_H_
+
+#include <rte_log.h>
+
+extern int hn_logtype_init;
+extern int hn_logtype_driver;
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_init, "%s(): " fmt "\n",\
+ __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_driver, \
+ "%s() rx: " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_driver, \
+ "%s() tx: " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_driver, "%s(): " fmt "\n", \
+ __func__, ## args)
+
+#endif /* _HN_LOGS_H_ */
diff --git a/drivers/net/netvsc/hn_nvs.c b/drivers/net/netvsc/hn_nvs.c
new file mode 100644
index 00000000..77d3b839
--- /dev/null
+++ b/drivers/net/netvsc/hn_nvs.c
@@ -0,0 +1,546 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * Copyright (c) 2010-2012 Citrix Inc.
+ * Copyright (c) 2012 NetApp Inc.
+ * All rights reserved.
+ */
+
+/*
+ * Network Virtualization Service.
+ */
+
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus_vmbus.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_nvs.h"
+
+static const uint32_t hn_nvs_version[] = {
+ NVS_VERSION_61,
+ NVS_VERSION_6,
+ NVS_VERSION_5,
+ NVS_VERSION_4,
+ NVS_VERSION_2,
+ NVS_VERSION_1
+};
+
+static int hn_nvs_req_send(struct hn_data *hv,
+ void *req, uint32_t reqlen)
+{
+ return rte_vmbus_chan_send(hn_primary_chan(hv),
+ VMBUS_CHANPKT_TYPE_INBAND,
+ req, reqlen, 0,
+ VMBUS_CHANPKT_FLAG_NONE, NULL);
+}
+
+static int
+hn_nvs_execute(struct hn_data *hv,
+ void *req, uint32_t reqlen,
+ void *resp, uint32_t resplen,
+ uint32_t type)
+{
+ struct vmbus_channel *chan = hn_primary_chan(hv);
+ char buffer[NVS_RESPSIZE_MAX];
+ const struct hn_nvs_hdr *hdr;
+ uint32_t len;
+ int ret;
+
+ /* Send request to ring buffer */
+ ret = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
+ req, reqlen, 0,
+ VMBUS_CHANPKT_FLAG_RC, NULL);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "send request failed: %d", ret);
+ return ret;
+ }
+
+ retry:
+ len = sizeof(buffer);
+ ret = rte_vmbus_chan_recv(chan, buffer, &len, NULL);
+ if (ret == -EAGAIN) {
+ rte_delay_us(HN_CHAN_INTERVAL_US);
+ goto retry;
+ }
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "recv response failed: %d", ret);
+ return ret;
+ }
+
+ hdr = (struct hn_nvs_hdr *)buffer;
+ if (hdr->type != type) {
+ PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x",
+ hdr->type, type);
+ return -EINVAL;
+ }
+
+ if (len < resplen) {
+ PMD_DRV_LOG(ERR,
+ "invalid NVS resp len %u (expect %u)",
+ len, resplen);
+ return -EINVAL;
+ }
+
+ memcpy(resp, buffer, resplen);
+
+ /* All pass! */
+ return 0;
+}
+
+static int
+hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver)
+{
+ struct hn_nvs_init init;
+ struct hn_nvs_init_resp resp;
+ uint32_t status;
+ int error;
+
+ memset(&init, 0, sizeof(init));
+ init.type = NVS_TYPE_INIT;
+ init.ver_min = nvs_ver;
+ init.ver_max = nvs_ver;
+
+ error = hn_nvs_execute(hv, &init, sizeof(init),
+ &resp, sizeof(resp),
+ NVS_TYPE_INIT_RESP);
+ if (error)
+ return error;
+
+ status = resp.status;
+ if (status != NVS_STATUS_OK) {
+ /* Not fatal, try other versions */
+ PMD_INIT_LOG(DEBUG, "nvs init failed for ver 0x%x",
+ nvs_ver);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+hn_nvs_conn_rxbuf(struct hn_data *hv)
+{
+ struct hn_nvs_rxbuf_conn conn;
+ struct hn_nvs_rxbuf_connresp resp;
+ uint32_t status;
+ int error;
+
+ /* Kernel has already setup RXBUF on primary channel. */
+
+ /*
+ * Connect RXBUF to NVS.
+ */
+ conn.type = NVS_TYPE_RXBUF_CONN;
+ conn.gpadl = hv->rxbuf_res->phys_addr;
+ conn.sig = NVS_RXBUF_SIG;
+ PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64,
+ hv->rxbuf_res->addr,
+ hv->rxbuf_res->phys_addr);
+
+ error = hn_nvs_execute(hv, &conn, sizeof(conn),
+ &resp, sizeof(resp),
+ NVS_TYPE_RXBUF_CONNRESP);
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "exec nvs rxbuf conn failed: %d",
+ error);
+ return error;
+ }
+
+ status = resp.status;
+ if (status != NVS_STATUS_OK) {
+ PMD_DRV_LOG(ERR,
+ "nvs rxbuf conn failed: %x", status);
+ return -EIO;
+ }
+ if (resp.nsect != 1) {
+ PMD_DRV_LOG(ERR,
+ "nvs rxbuf response num sections %u != 1",
+ resp.nsect);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "receive buffer size %u count %u",
+ resp.nvs_sect[0].slotsz,
+ resp.nvs_sect[0].slotcnt);
+ hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
+
+ hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt,
+ sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE);
+ if (!hv->rxbuf_info) {
+ PMD_DRV_LOG(ERR,
+ "could not allocate rxbuf info");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+hn_nvs_disconn_rxbuf(struct hn_data *hv)
+{
+ struct hn_nvs_rxbuf_disconn disconn;
+ int error;
+
+ /*
+ * Disconnect RXBUF from NVS.
+ */
+ memset(&disconn, 0, sizeof(disconn));
+ disconn.type = NVS_TYPE_RXBUF_DISCONN;
+ disconn.sig = NVS_RXBUF_SIG;
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "send nvs rxbuf disconn failed: %d",
+ error);
+ }
+
+ rte_free(hv->rxbuf_info);
+ /*
+ * Linger long enough for NVS to disconnect RXBUF.
+ */
+ rte_delay_ms(200);
+}
+
+static void
+hn_nvs_disconn_chim(struct hn_data *hv)
+{
+ int error;
+
+ if (hv->chim_cnt != 0) {
+ struct hn_nvs_chim_disconn disconn;
+
+ /* Disconnect chimney sending buffer from NVS. */
+ memset(&disconn, 0, sizeof(disconn));
+ disconn.type = NVS_TYPE_CHIM_DISCONN;
+ disconn.sig = NVS_CHIM_SIG;
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
+
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "send nvs chim disconn failed: %d", error);
+ }
+
+ hv->chim_cnt = 0;
+ /*
+ * Linger long enough for NVS to disconnect chimney
+ * sending buffer.
+ */
+ rte_delay_ms(200);
+ }
+}
+
+static int
+hn_nvs_conn_chim(struct hn_data *hv)
+{
+ struct hn_nvs_chim_conn chim;
+ struct hn_nvs_chim_connresp resp;
+ uint32_t sectsz;
+ unsigned long len = hv->chim_res->len;
+ int error;
+
+ /* Connect chimney sending buffer to NVS */
+ memset(&chim, 0, sizeof(chim));
+ chim.type = NVS_TYPE_CHIM_CONN;
+ chim.gpadl = hv->chim_res->phys_addr;
+ chim.sig = NVS_CHIM_SIG;
+ PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64,
+ hv->chim_res->addr,
+ hv->chim_res->phys_addr);
+
+ error = hn_nvs_execute(hv, &chim, sizeof(chim),
+ &resp, sizeof(resp),
+ NVS_TYPE_CHIM_CONNRESP);
+ if (error) {
+ PMD_DRV_LOG(ERR, "exec nvs chim conn failed");
+ goto cleanup;
+ }
+
+ if (resp.status != NVS_STATUS_OK) {
+ PMD_DRV_LOG(ERR, "nvs chim conn failed: %x",
+ resp.status);
+ error = -EIO;
+ goto cleanup;
+ }
+
+ sectsz = resp.sectsz;
+ if (sectsz == 0 || sectsz & (sizeof(uint32_t) - 1)) {
+ /* Can't use chimney sending buffer; done! */
+ PMD_DRV_LOG(NOTICE,
+ "invalid chimney sending buffer section size: %u",
+ sectsz);
+ return 0;
+ }
+
+ hv->chim_szmax = sectsz;
+ hv->chim_cnt = len / sectsz;
+
+ PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u",
+ len, hv->chim_szmax, hv->chim_cnt);
+
+ if (len % hv->chim_szmax != 0) {
+ PMD_DRV_LOG(NOTICE,
+ "chimney sending sections are not properly aligned");
+ }
+
+ /* Done! */
+ return 0;
+
+cleanup:
+ hn_nvs_disconn_chim(hv);
+ return error;
+}
+
+/*
+ * Configure MTU and enable VLAN.
+ */
+static int
+hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu)
+{
+ struct hn_nvs_ndis_conf conf;
+ int error;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.type = NVS_TYPE_NDIS_CONF;
+ conf.mtu = mtu + ETHER_HDR_LEN;
+ conf.caps = NVS_NDIS_CONF_VLAN;
+
+ /* TODO enable SRIOV */
+ //if (hv->nvs_ver >= NVS_VERSION_5)
+ // conf.caps |= NVS_NDIS_CONF_SRIOV;
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &conf, sizeof(conf));
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "send nvs ndis conf failed: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+hn_nvs_init_ndis(struct hn_data *hv)
+{
+ struct hn_nvs_ndis_init ndis;
+ int error;
+
+ memset(&ndis, 0, sizeof(ndis));
+ ndis.type = NVS_TYPE_NDIS_INIT;
+ ndis.ndis_major = NDIS_VERSION_MAJOR(hv->ndis_ver);
+ ndis.ndis_minor = NDIS_VERSION_MINOR(hv->ndis_ver);
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &ndis, sizeof(ndis));
+ if (error)
+ PMD_DRV_LOG(ERR,
+ "send nvs ndis init failed: %d", error);
+
+ return error;
+}
+
+static int
+hn_nvs_init(struct hn_data *hv)
+{
+ unsigned int i;
+ int error;
+
+ /*
+ * Find the supported NVS version and set NDIS version accordingly.
+ */
+ for (i = 0; i < RTE_DIM(hn_nvs_version); ++i) {
+ error = hn_nvs_doinit(hv, hn_nvs_version[i]);
+ if (error) {
+ PMD_INIT_LOG(DEBUG, "version %#x error %d",
+ hn_nvs_version[i], error);
+ continue;
+ }
+
+ hv->nvs_ver = hn_nvs_version[i];
+
+ /* Set NDIS version according to NVS version. */
+ hv->ndis_ver = NDIS_VERSION_6_30;
+ if (hv->nvs_ver <= NVS_VERSION_4)
+ hv->ndis_ver = NDIS_VERSION_6_1;
+
+ PMD_INIT_LOG(DEBUG,
+ "NVS version %#x, NDIS version %u.%u",
+ hv->nvs_ver, NDIS_VERSION_MAJOR(hv->ndis_ver),
+ NDIS_VERSION_MINOR(hv->ndis_ver));
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR,
+ "no NVS compatible version available");
+ return -ENXIO;
+}
+
+int
+hn_nvs_attach(struct hn_data *hv, unsigned int mtu)
+{
+ int error;
+
+ /*
+ * Initialize NVS.
+ */
+ error = hn_nvs_init(hv);
+ if (error)
+ return error;
+
+ /** Configure NDIS before initializing it. */
+ if (hv->nvs_ver >= NVS_VERSION_2) {
+ error = hn_nvs_conf_ndis(hv, mtu);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Initialize NDIS.
+ */
+ error = hn_nvs_init_ndis(hv);
+ if (error)
+ return error;
+
+ /*
+ * Connect RXBUF.
+ */
+ error = hn_nvs_conn_rxbuf(hv);
+ if (error)
+ return error;
+
+ /*
+ * Connect chimney sending buffer.
+ */
+ error = hn_nvs_conn_chim(hv);
+ if (error) {
+ hn_nvs_disconn_rxbuf(hv);
+ return error;
+ }
+
+ return 0;
+}
+
+void
+hn_nvs_detach(struct hn_data *hv __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* NOTE: there are no requests to stop the NVS. */
+ hn_nvs_disconn_rxbuf(hv);
+ hn_nvs_disconn_chim(hv);
+}
+
+/*
+ * Ack the consumed RXBUF associated w/ this channel packet,
+ * so that this RXBUF can be recycled by the hypervisor.
+ */
+void
+hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid)
+{
+ unsigned int retries = 0;
+ struct hn_nvs_rndis_ack ack = {
+ .type = NVS_TYPE_RNDIS_ACK,
+ .status = NVS_STATUS_OK,
+ };
+ int error;
+
+ PMD_RX_LOG(DEBUG, "ack RX id %" PRIu64, tid);
+
+ again:
+ error = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP,
+ &ack, sizeof(ack), tid,
+ VMBUS_CHANPKT_FLAG_NONE, NULL);
+
+ if (error == 0)
+ return;
+
+ if (error == -EAGAIN) {
+ /*
+ * NOTE:
+ * This should _not_ happen in real world, since the
+ * consumption of the TX bufring from the TX path is
+ * controlled.
+ */
+ PMD_RX_LOG(NOTICE, "RXBUF ack retry");
+ if (++retries < 10) {
+ rte_delay_ms(1);
+ goto again;
+ }
+ }
+ /* RXBUF leaks! */
+ PMD_DRV_LOG(ERR, "RXBUF ack failed");
+}
+
+int
+hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch)
+{
+ struct hn_nvs_subch_req req;
+ struct hn_nvs_subch_resp resp;
+ int error;
+
+ memset(&req, 0, sizeof(req));
+ req.type = NVS_TYPE_SUBCH_REQ;
+ req.op = NVS_SUBCH_OP_ALLOC;
+ req.nsubch = *nsubch;
+
+ error = hn_nvs_execute(hv, &req, sizeof(req),
+ &resp, sizeof(resp),
+ NVS_TYPE_SUBCH_RESP);
+ if (error)
+ return error;
+
+ if (resp.status != NVS_STATUS_OK) {
+ PMD_INIT_LOG(ERR,
+ "nvs subch alloc failed: %#x",
+ resp.status);
+ return -EIO;
+ }
+
+ if (resp.nsubch > *nsubch) {
+ PMD_INIT_LOG(NOTICE,
+ "%u subchans are allocated, requested %u",
+ resp.nsubch, *nsubch);
+ }
+ *nsubch = resp.nsubch;
+
+ return 0;
+}
+
+void
+hn_nvs_set_datapath(struct hn_data *hv, uint32_t path)
+{
+ struct hn_nvs_datapath dp;
+
+ memset(&dp, 0, sizeof(dp));
+ dp.type = NVS_TYPE_SET_DATAPATH;
+ dp.active_path = path;
+
+ hn_nvs_req_send(hv, &dp, sizeof(dp));
+}
diff --git a/drivers/net/netvsc/hn_nvs.h b/drivers/net/netvsc/hn_nvs.h
new file mode 100644
index 00000000..984a9c11
--- /dev/null
+++ b/drivers/net/netvsc/hn_nvs.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * All rights reserved.
+ */
+
+/*
+ * The indirection table message is the largest message
+ * received from host, and that is 112 bytes.
+ */
+#define NVS_RESPSIZE_MAX 256
+
+/*
+ * NDIS protocol version numbers
+ */
+#define NDIS_VERSION_6_1 0x00060001
+#define NDIS_VERSION_6_20 0x00060014
+#define NDIS_VERSION_6_30 0x0006001e
+#define NDIS_VERSION_MAJOR(ver) (((ver) & 0xffff0000) >> 16)
+#define NDIS_VERSION_MINOR(ver) ((ver) & 0xffff)
+
+/*
+ * NVS versions.
+ */
+#define NVS_VERSION_1 0x00002
+#define NVS_VERSION_2 0x30002
+#define NVS_VERSION_4 0x40000
+#define NVS_VERSION_5 0x50000
+#define NVS_VERSION_6 0x60000
+#define NVS_VERSION_61 0x60001
+
+#define NVS_RXBUF_SIG 0xcafe
+#define NVS_CHIM_SIG 0xface
+
+#define NVS_CHIM_IDX_INVALID 0xffffffff
+
+#define NVS_RNDIS_MTYPE_DATA 0
+#define NVS_RNDIS_MTYPE_CTRL 1
+
+/*
+ * NVS message transacion status codes.
+ */
+#define NVS_STATUS_OK 1
+#define NVS_STATUS_FAILED 2
+
+/*
+ * NVS request/response message types.
+ */
+#define NVS_TYPE_INIT 1
+#define NVS_TYPE_INIT_RESP 2
+
+#define NVS_TYPE_NDIS_INIT 100
+#define NVS_TYPE_RXBUF_CONN 101
+#define NVS_TYPE_RXBUF_CONNRESP 102
+#define NVS_TYPE_RXBUF_DISCONN 103
+#define NVS_TYPE_CHIM_CONN 104
+#define NVS_TYPE_CHIM_CONNRESP 105
+#define NVS_TYPE_CHIM_DISCONN 106
+#define NVS_TYPE_RNDIS 107
+#define NVS_TYPE_RNDIS_ACK 108
+
+#define NVS_TYPE_NDIS_CONF 125
+#define NVS_TYPE_VFASSOC_NOTE 128 /* notification */
+#define NVS_TYPE_SET_DATAPATH 129
+#define NVS_TYPE_SUBCH_REQ 133
+#define NVS_TYPE_SUBCH_RESP 133 /* same as SUBCH_REQ */
+#define NVS_TYPE_TXTBL_NOTE 134 /* notification */
+
+
+/* NVS message common header */
+struct hn_nvs_hdr {
+ uint32_t type;
+} __rte_packed;
+
+struct hn_nvs_init {
+ uint32_t type; /* NVS_TYPE_INIT */
+ uint32_t ver_min;
+ uint32_t ver_max;
+ uint8_t rsvd[28];
+} __rte_packed;
+
+struct hn_nvs_init_resp {
+ uint32_t type; /* NVS_TYPE_INIT_RESP */
+ uint32_t ver; /* deprecated */
+ uint32_t rsvd;
+ uint32_t status; /* NVS_STATUS_ */
+} __rte_packed;
+
+/* No response */
+struct hn_nvs_ndis_conf {
+ uint32_t type; /* NVS_TYPE_NDIS_CONF */
+ uint32_t mtu;
+ uint32_t rsvd;
+ uint64_t caps; /* NVS_NDIS_CONF_ */
+ uint8_t rsvd1[20];
+} __rte_packed;
+
+#define NVS_NDIS_CONF_SRIOV 0x0004
+#define NVS_NDIS_CONF_VLAN 0x0008
+
+/* No response */
+struct hn_nvs_ndis_init {
+ uint32_t type; /* NVS_TYPE_NDIS_INIT */
+ uint32_t ndis_major; /* NDIS_VERSION_MAJOR_ */
+ uint32_t ndis_minor; /* NDIS_VERSION_MINOR_ */
+ uint8_t rsvd[28];
+} __rte_packed;
+
+#define NVS_DATAPATH_SYNTHETIC 0
+#define NVS_DATAPATH_VF 1
+
+/* No response */
+struct hn_nvs_datapath {
+ uint32_t type; /* NVS_TYPE_SET_DATAPATH */
+ uint32_t active_path;/* NVS_DATAPATH_* */
+ uint8_t rsvd[32];
+} __rte_packed;
+
+struct hn_nvs_rxbuf_conn {
+ uint32_t type; /* NVS_TYPE_RXBUF_CONN */
+ uint32_t gpadl; /* RXBUF vmbus GPADL */
+ uint16_t sig; /* NVS_RXBUF_SIG */
+ uint8_t rsvd[30];
+} __rte_packed;
+
+struct hn_nvs_rxbuf_sect {
+ uint32_t start;
+ uint32_t slotsz;
+ uint32_t slotcnt;
+ uint32_t end;
+} __rte_packed;
+
+struct hn_nvs_rxbuf_connresp {
+ uint32_t type; /* NVS_TYPE_RXBUF_CONNRESP */
+ uint32_t status; /* NVS_STATUS_ */
+ uint32_t nsect; /* # of elem in nvs_sect */
+ struct hn_nvs_rxbuf_sect nvs_sect[1];
+} __rte_packed;
+
+/* No response */
+struct hn_nvs_rxbuf_disconn {
+ uint32_t type; /* NVS_TYPE_RXBUF_DISCONN */
+ uint16_t sig; /* NVS_RXBUF_SIG */
+ uint8_t rsvd[34];
+} __rte_packed;
+
+struct hn_nvs_chim_conn {
+ uint32_t type; /* NVS_TYPE_CHIM_CONN */
+ uint32_t gpadl; /* chimney buf vmbus GPADL */
+ uint16_t sig; /* NDIS_NVS_CHIM_SIG */
+ uint8_t rsvd[30];
+} __rte_packed;
+
+struct hn_nvs_chim_connresp {
+ uint32_t type; /* NVS_TYPE_CHIM_CONNRESP */
+ uint32_t status; /* NVS_STATUS_ */
+ uint32_t sectsz; /* section size */
+} __rte_packed;
+
+/* No response */
+struct hn_nvs_chim_disconn {
+ uint32_t type; /* NVS_TYPE_CHIM_DISCONN */
+ uint16_t sig; /* NVS_CHIM_SIG */
+ uint8_t rsvd[34];
+} __rte_packed;
+
+#define NVS_SUBCH_OP_ALLOC 1
+
+struct hn_nvs_subch_req {
+ uint32_t type; /* NVS_TYPE_SUBCH_REQ */
+ uint32_t op; /* NVS_SUBCH_OP_ */
+ uint32_t nsubch;
+ uint8_t rsvd[28];
+} __rte_packed;
+
+struct hn_nvs_subch_resp {
+ uint32_t type; /* NVS_TYPE_SUBCH_RESP */
+ uint32_t status; /* NVS_STATUS_ */
+ uint32_t nsubch;
+ uint8_t rsvd[28];
+} __rte_packed;
+
+struct hn_nvs_rndis {
+ uint32_t type; /* NVS_TYPE_RNDIS */
+ uint32_t rndis_mtype;/* NVS_RNDIS_MTYPE_ */
+ /*
+ * Chimney sending buffer index and size.
+ *
+ * NOTE:
+ * If nvs_chim_idx is set to NVS_CHIM_IDX_INVALID
+ * and nvs_chim_sz is set to 0, then chimney sending
+ * buffer is _not_ used by this RNDIS message.
+ */
+ uint32_t chim_idx;
+ uint32_t chim_sz;
+ uint8_t rsvd[24];
+} __rte_packed;
+
+struct hn_nvs_rndis_ack {
+ uint32_t type; /* NVS_TYPE_RNDIS_ACK */
+ uint32_t status; /* NVS_STATUS_ */
+ uint8_t rsvd[32];
+} __rte_packed;
+
+
+int hn_nvs_attach(struct hn_data *hv, unsigned int mtu);
+void hn_nvs_detach(struct hn_data *hv);
+void hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid);
+int hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch);
+void hn_nvs_set_datapath(struct hn_data *hv, uint32_t path);
+
+static inline int
+hn_nvs_send(struct vmbus_channel *chan, uint16_t flags,
+ void *nvs_msg, int nvs_msglen, uintptr_t sndc,
+ bool *need_sig)
+{
+ return rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
+ nvs_msg, nvs_msglen, (uint64_t)sndc,
+ flags, need_sig);
+}
+
+static inline int
+hn_nvs_send_sglist(struct vmbus_channel *chan,
+ struct vmbus_gpa sg[], unsigned int sglen,
+ void *nvs_msg, int nvs_msglen,
+ uintptr_t sndc, bool *need_sig)
+{
+ return rte_vmbus_chan_send_sglist(chan, sg, sglen, nvs_msg, nvs_msglen,
+ (uint64_t)sndc, need_sig);
+}
diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c
new file mode 100644
index 00000000..bde33969
--- /dev/null
+++ b/drivers/net/netvsc/hn_rndis.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2009-2018 Microsoft Corp.
+ * Copyright (c) 2010-2012 Citrix Inc.
+ * Copyright (c) 2012 NetApp Inc.
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus_vmbus.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_nvs.h"
+#include "hn_rndis.h"
+#include "ndis.h"
+
+#define HN_RNDIS_XFER_SIZE 0x4000
+
+#define HN_NDIS_TXCSUM_CAP_IP4 \
+ (NDIS_TXCSUM_CAP_IP4 | NDIS_TXCSUM_CAP_IP4OPT)
+#define HN_NDIS_TXCSUM_CAP_TCP4 \
+ (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT)
+#define HN_NDIS_TXCSUM_CAP_TCP6 \
+ (NDIS_TXCSUM_CAP_TCP6 | NDIS_TXCSUM_CAP_TCP6OPT | \
+ NDIS_TXCSUM_CAP_IP6EXT)
+#define HN_NDIS_TXCSUM_CAP_UDP6 \
+ (NDIS_TXCSUM_CAP_UDP6 | NDIS_TXCSUM_CAP_IP6EXT)
+#define HN_NDIS_LSOV2_CAP_IP6 \
+ (NDIS_LSOV2_CAP_IP6EXT | NDIS_LSOV2_CAP_TCP6OPT)
+
+/* Get unique request id */
+static inline uint32_t
+hn_rndis_rid(struct hn_data *hv)
+{
+ uint32_t rid;
+
+ do {
+ rid = rte_atomic32_add_return(&hv->rndis_req_id, 1);
+ } while (rid == 0);
+
+ return rid;
+}
+
+static void *hn_rndis_alloc(struct hn_data *hv, size_t size)
+{
+ return rte_zmalloc_socket("RNDIS", size, PAGE_SIZE,
+ hv->vmbus->device.numa_node);
+}
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP
+void hn_rndis_dump(const void *buf)
+{
+ const union {
+ struct rndis_msghdr hdr;
+ struct rndis_packet_msg pkt;
+ struct rndis_init_req init_request;
+ struct rndis_init_comp init_complete;
+ struct rndis_halt_req halt;
+ struct rndis_query_req query_request;
+ struct rndis_query_comp query_complete;
+ struct rndis_set_req set_request;
+ struct rndis_set_comp set_complete;
+ struct rndis_reset_req reset_request;
+ struct rndis_reset_comp reset_complete;
+ struct rndis_keepalive_req keepalive_request;
+ struct rndis_keepalive_comp keepalive_complete;
+ struct rndis_status_msg indicate_status;
+ } *rndis_msg = buf;
+
+ switch (rndis_msg->hdr.type) {
+ case RNDIS_PACKET_MSG: {
+ const struct rndis_pktinfo *ppi;
+ unsigned int ppi_len;
+
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_PACKET (len %u, data %u:%u, # oob %u %u:%u, pkt %u:%u)\n",
+ rndis_msg->pkt.len,
+ rndis_msg->pkt.dataoffset,
+ rndis_msg->pkt.datalen,
+ rndis_msg->pkt.oobdataelements,
+ rndis_msg->pkt.oobdataoffset,
+ rndis_msg->pkt.oobdatalen,
+ rndis_msg->pkt.pktinfooffset,
+ rndis_msg->pkt.pktinfolen);
+
+ ppi = (const struct rndis_pktinfo *)
+ ((const char *)buf
+ + RNDIS_PACKET_MSG_OFFSET_ABS(rndis_msg->pkt.pktinfooffset));
+
+ ppi_len = rndis_msg->pkt.pktinfolen;
+ while (ppi_len > 0) {
+ const void *ppi_data;
+
+ ppi_data = ppi->data;
+
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ " PPI (size %u, type %u, offs %u data %#x)\n",
+ ppi->size, ppi->type, ppi->offset,
+ *(const uint32_t *)ppi_data);
+ if (ppi->size == 0)
+ break;
+ ppi_len -= ppi->size;
+ ppi = (const struct rndis_pktinfo *)
+ ((const char *)ppi + ppi->size);
+ }
+ break;
+ }
+ case RNDIS_INITIALIZE_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_INIT (len %u id %#x, ver %u.%u max xfer %u)\n",
+ rndis_msg->init_request.len,
+ rndis_msg->init_request.rid,
+ rndis_msg->init_request.ver_major,
+ rndis_msg->init_request.ver_minor,
+ rndis_msg->init_request.max_xfersz);
+ break;
+
+ case RNDIS_INITIALIZE_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_INIT_C (len %u, id %#x, status 0x%x, vers %u.%u, "
+ "flags %d, max xfer %u, max pkts %u, aligned %u)\n",
+ rndis_msg->init_complete.len,
+ rndis_msg->init_complete.rid,
+ rndis_msg->init_complete.status,
+ rndis_msg->init_complete.ver_major,
+ rndis_msg->init_complete.ver_minor,
+ rndis_msg->init_complete.devflags,
+ rndis_msg->init_complete.pktmaxsz,
+ rndis_msg->init_complete.pktmaxcnt,
+ rndis_msg->init_complete.align);
+ break;
+
+ case RNDIS_HALT_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_HALT (len %u id %#x)\n",
+ rndis_msg->halt.len, rndis_msg->halt.rid);
+ break;
+
+ case RNDIS_QUERY_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_QUERY (len %u, id %#x, oid %#x, info %u:%u)\n",
+ rndis_msg->query_request.len,
+ rndis_msg->query_request.rid,
+ rndis_msg->query_request.oid,
+ rndis_msg->query_request.infobuflen,
+ rndis_msg->query_request.infobufoffset);
+ break;
+
+ case RNDIS_QUERY_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_QUERY_C (len %u, id %#x, status 0x%x, buf %u:%u)\n",
+ rndis_msg->query_complete.len,
+ rndis_msg->query_complete.rid,
+ rndis_msg->query_complete.status,
+ rndis_msg->query_complete.infobuflen,
+ rndis_msg->query_complete.infobufoffset);
+ break;
+
+ case RNDIS_SET_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_SET (len %u, id %#x, oid %#x, info %u:%u)\n",
+ rndis_msg->set_request.len,
+ rndis_msg->set_request.rid,
+ rndis_msg->set_request.oid,
+ rndis_msg->set_request.infobuflen,
+ rndis_msg->set_request.infobufoffset);
+ break;
+
+ case RNDIS_SET_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
+ rndis_msg->set_complete.len,
+ rndis_msg->set_complete.rid,
+ rndis_msg->set_complete.status);
+ break;
+
+ case RNDIS_INDICATE_STATUS_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_INDICATE (len %u, status %#x, buf len %u, buf offset %u)\n",
+ rndis_msg->indicate_status.len,
+ rndis_msg->indicate_status.status,
+ rndis_msg->indicate_status.stbuflen,
+ rndis_msg->indicate_status.stbufoffset);
+ break;
+
+ case RNDIS_RESET_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_RESET (len %u, id %#x)\n",
+ rndis_msg->reset_request.len,
+ rndis_msg->reset_request.rid);
+ break;
+
+ case RNDIS_RESET_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_RESET_C (len %u, status %#x address %#x)\n",
+ rndis_msg->reset_complete.len,
+ rndis_msg->reset_complete.status,
+ rndis_msg->reset_complete.adrreset);
+ break;
+
+ case RNDIS_KEEPALIVE_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_KEEPALIVE (len %u, id %#x)\n",
+ rndis_msg->keepalive_request.len,
+ rndis_msg->keepalive_request.rid);
+ break;
+
+ case RNDIS_KEEPALIVE_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_KEEPALIVE_C (len %u, id %#x address %#x)\n",
+ rndis_msg->keepalive_complete.len,
+ rndis_msg->keepalive_complete.rid,
+ rndis_msg->keepalive_complete.status);
+ break;
+
+ default:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS type %#x len %u\n",
+ rndis_msg->hdr.type,
+ rndis_msg->hdr.len);
+ break;
+ }
+}
+#endif
+
+static int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan,
+ const void *req, uint32_t reqlen)
+
+{
+ struct hn_nvs_rndis nvs_rndis = {
+ .type = NVS_TYPE_RNDIS,
+ .rndis_mtype = NVS_RNDIS_MTYPE_CTRL,
+ .chim_idx = NVS_CHIM_IDX_INVALID,
+ .chim_sz = 0
+ };
+ struct vmbus_gpa sg;
+ rte_iova_t addr;
+
+ addr = rte_malloc_virt2iova(req);
+ if (unlikely(addr == RTE_BAD_IOVA)) {
+ PMD_DRV_LOG(ERR, "RNDIS send request can not get iova");
+ return -EINVAL;
+ }
+
+ if (unlikely(reqlen > PAGE_SIZE)) {
+ PMD_DRV_LOG(ERR, "RNDIS request %u greater than page size",
+ reqlen);
+ return -EINVAL;
+ }
+
+ sg.page = addr / PAGE_SIZE;
+ sg.ofs = addr & PAGE_MASK;
+ sg.len = reqlen;
+
+ if (sg.ofs + reqlen > PAGE_SIZE) {
+ PMD_DRV_LOG(ERR, "RNDIS request crosses page bounary");
+ return -EINVAL;
+ }
+
+ hn_rndis_dump(req);
+
+ return hn_nvs_send_sglist(chan, &sg, 1,
+ &nvs_rndis, sizeof(nvs_rndis), 0U, NULL);
+}
+
+void hn_rndis_link_status(struct hn_data *hv __rte_unused, const void *msg)
+{
+ const struct rndis_status_msg *indicate = msg;
+
+ hn_rndis_dump(msg);
+
+ PMD_DRV_LOG(DEBUG, "link status %#x", indicate->status);
+
+ switch (indicate->status) {
+ case RNDIS_STATUS_LINK_SPEED_CHANGE:
+ case RNDIS_STATUS_NETWORK_CHANGE:
+ case RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG:
+ /* ignore not in DPDK API */
+ break;
+
+ case RNDIS_STATUS_MEDIA_CONNECT:
+ case RNDIS_STATUS_MEDIA_DISCONNECT:
+ /* TODO handle as LSC interrupt */
+ break;
+ default:
+ PMD_DRV_LOG(NOTICE, "unknown RNDIS indication: %#x",
+ indicate->status);
+ }
+}
+
+/* Callback from hn_process_events when response is visible */
+void hn_rndis_receive_response(struct hn_data *hv,
+ const void *data, uint32_t len)
+{
+ const struct rndis_init_comp *hdr = data;
+
+ hn_rndis_dump(data);
+
+ if (len < sizeof(3 * sizeof(uint32_t))) {
+ PMD_DRV_LOG(ERR,
+ "missing RNDIS header %u", len);
+ return;
+ }
+
+ if (len < hdr->len) {
+ PMD_DRV_LOG(ERR,
+ "truncated RNDIS response %u", len);
+ return;
+ }
+
+ if (len > sizeof(hv->rndis_resp)) {
+ PMD_DRV_LOG(NOTICE,
+ "RNDIS response exceeds buffer");
+ len = sizeof(hv->rndis_resp);
+ }
+
+ if (hdr->rid == 0) {
+ PMD_DRV_LOG(NOTICE,
+ "RNDIS response id zero!");
+ }
+
+ memcpy(hv->rndis_resp, data, len);
+
+ /* make sure response copied before update */
+ rte_smp_wmb();
+
+ if (rte_atomic32_cmpset(&hv->rndis_pending, hdr->rid, 0) == 0) {
+ PMD_DRV_LOG(ERR,
+ "received id %#x pending id %#x",
+ hdr->rid, (uint32_t)hv->rndis_pending);
+ }
+}
+
+/* Do request/response transaction */
+static int hn_rndis_exec1(struct hn_data *hv,
+ const void *req, uint32_t reqlen,
+ void *comp, uint32_t comp_len)
+{
+ const struct rndis_halt_req *hdr = req;
+ uint32_t rid = hdr->rid;
+ struct vmbus_channel *chan = hn_primary_chan(hv);
+ int error;
+
+ if (comp_len > sizeof(hv->rndis_resp)) {
+ PMD_DRV_LOG(ERR,
+ "Expected completion size %u exceeds buffer %zu",
+ comp_len, sizeof(hv->rndis_resp));
+ return -EIO;
+ }
+
+ if (comp != NULL &&
+ rte_atomic32_cmpset(&hv->rndis_pending, 0, rid) == 0) {
+ PMD_DRV_LOG(ERR,
+ "Request already pending");
+ return -EBUSY;
+ }
+
+ error = hn_nvs_send_rndis_ctrl(chan, req, reqlen);
+ if (error) {
+ PMD_DRV_LOG(ERR, "RNDIS ctrl send failed: %d", error);
+ return error;
+ }
+
+ if (comp) {
+ /* Poll primary channel until response received */
+ while (hv->rndis_pending == rid)
+ hn_process_events(hv, 0);
+
+ memcpy(comp, hv->rndis_resp, comp_len);
+ }
+
+ return 0;
+}
+
+/* Do transaction and validate response */
+static int hn_rndis_execute(struct hn_data *hv, uint32_t rid,
+ const void *req, uint32_t reqlen,
+ void *comp, uint32_t comp_len, uint32_t comp_type)
+{
+ const struct rndis_comp_hdr *hdr = comp;
+ int ret;
+
+ memset(comp, 0, comp_len);
+
+ ret = hn_rndis_exec1(hv, req, reqlen, comp, comp_len);
+ if (ret < 0)
+ return ret;
+ /*
+ * Check this RNDIS complete message.
+ */
+ if (unlikely(hdr->type != comp_type)) {
+ PMD_DRV_LOG(ERR,
+ "unexpected RNDIS response complete %#x expect %#x",
+ hdr->type, comp_type);
+
+ return -ENXIO;
+ }
+ if (unlikely(hdr->rid != rid)) {
+ PMD_DRV_LOG(ERR,
+ "RNDIS comp rid mismatch %#x, expect %#x",
+ hdr->rid, rid);
+ return -EINVAL;
+ }
+
+ /* All pass! */
+ return 0;
+}
+
+static int
+hn_rndis_query(struct hn_data *hv, uint32_t oid,
+ const void *idata, uint32_t idlen,
+ void *odata, uint32_t odlen)
+{
+ struct rndis_query_req *req;
+ struct rndis_query_comp *comp;
+ uint32_t reqlen, comp_len;
+ int error = -EIO;
+ unsigned int ofs;
+ uint32_t rid;
+
+ reqlen = sizeof(*req) + idlen;
+ req = hn_rndis_alloc(hv, reqlen);
+ if (req == NULL)
+ return -ENOMEM;
+
+ comp_len = sizeof(*comp) + odlen;
+ comp = rte_zmalloc("QUERY", comp_len, PAGE_SIZE);
+ if (!comp) {
+ error = -ENOMEM;
+ goto done;
+ }
+ comp->status = RNDIS_STATUS_PENDING;
+
+ rid = hn_rndis_rid(hv);
+
+ req->type = RNDIS_QUERY_MSG;
+ req->len = reqlen;
+ req->rid = rid;
+ req->oid = oid;
+ req->infobufoffset = RNDIS_QUERY_REQ_INFOBUFOFFSET;
+ req->infobuflen = idlen;
+
+ /* Input data immediately follows RNDIS query. */
+ memcpy(req + 1, idata, idlen);
+
+ error = hn_rndis_execute(hv, rid, req, reqlen,
+ comp, comp_len, RNDIS_QUERY_CMPLT);
+
+ if (error)
+ goto done;
+
+ if (comp->status != RNDIS_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR, "RNDIS query 0x%08x failed: status 0x%08x",
+ oid, comp->status);
+ error = -EINVAL;
+ goto done;
+ }
+
+ if (comp->infobuflen == 0 || comp->infobufoffset == 0) {
+ /* No output data! */
+ PMD_DRV_LOG(ERR, "RNDIS query 0x%08x, no data", oid);
+ error = 0;
+ goto done;
+ }
+
+ /*
+ * Check output data length and offset.
+ */
+ /* ofs is the offset from the beginning of comp. */
+ ofs = RNDIS_QUERY_COMP_INFOBUFOFFSET_ABS(comp->infobufoffset);
+ if (ofs < sizeof(*comp) || ofs + comp->infobuflen > comp_len) {
+ PMD_DRV_LOG(ERR, "RNDIS query invalid comp ib off/len, %u/%u",
+ comp->infobufoffset, comp->infobuflen);
+ error = -EINVAL;
+ goto done;
+ }
+
+ /* Save output data. */
+ if (comp->infobuflen < odlen)
+ odlen = comp->infobuflen;
+
+ /* ofs is the offset from the beginning of comp. */
+ memcpy(odata, (const char *)comp + ofs, odlen);
+
+ error = 0;
+done:
+ rte_free(comp);
+ rte_free(req);
+ return error;
+}
+
+static int
+hn_rndis_halt(struct hn_data *hv)
+{
+ struct rndis_halt_req *halt;
+
+ halt = hn_rndis_alloc(hv, sizeof(*halt));
+ if (halt == NULL)
+ return -ENOMEM;
+
+ halt->type = RNDIS_HALT_MSG;
+ halt->len = sizeof(*halt);
+ halt->rid = hn_rndis_rid(hv);
+
+ /* No RNDIS completion; rely on NVS message send completion */
+ hn_rndis_exec1(hv, halt, sizeof(*halt), NULL, 0);
+
+ rte_free(halt);
+
+ PMD_INIT_LOG(DEBUG, "RNDIS halt done");
+ return 0;
+}
+
+static int
+hn_rndis_query_hwcaps(struct hn_data *hv, struct ndis_offload *caps)
+{
+ struct ndis_offload in;
+ uint32_t caps_len, size;
+ int error;
+
+ memset(caps, 0, sizeof(*caps));
+ memset(&in, 0, sizeof(in));
+ in.ndis_hdr.ndis_type = NDIS_OBJTYPE_OFFLOAD;
+
+ if (hv->ndis_ver >= NDIS_VERSION_6_30) {
+ in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_3;
+ size = NDIS_OFFLOAD_SIZE;
+ } else if (hv->ndis_ver >= NDIS_VERSION_6_1) {
+ in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_2;
+ size = NDIS_OFFLOAD_SIZE_6_1;
+ } else {
+ in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_1;
+ size = NDIS_OFFLOAD_SIZE_6_0;
+ }
+ in.ndis_hdr.ndis_size = size;
+
+ caps_len = NDIS_OFFLOAD_SIZE;
+ error = hn_rndis_query(hv, OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
+ &in, size, caps, caps_len);
+ if (error)
+ return error;
+
+ /* Preliminary verification. */
+ if (caps->ndis_hdr.ndis_type != NDIS_OBJTYPE_OFFLOAD) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objtype 0x%02x",
+ caps->ndis_hdr.ndis_type);
+ return -EINVAL;
+ }
+ if (caps->ndis_hdr.ndis_rev < NDIS_OFFLOAD_REV_1) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objrev 0x%02x",
+ caps->ndis_hdr.ndis_rev);
+ return -EINVAL;
+ }
+ if (caps->ndis_hdr.ndis_size > caps_len) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objsize %u, data size %u",
+ caps->ndis_hdr.ndis_size, caps_len);
+ return -EINVAL;
+ } else if (caps->ndis_hdr.ndis_size < NDIS_OFFLOAD_SIZE_6_0) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objsize %u",
+ caps->ndis_hdr.ndis_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+hn_rndis_query_rsscaps(struct hn_data *hv,
+ unsigned int *rxr_cnt0)
+{
+ struct ndis_rss_caps in, caps;
+ unsigned int indsz, rxr_cnt;
+ uint32_t caps_len;
+ int error;
+
+ *rxr_cnt0 = 0;
+
+ if (hv->ndis_ver < NDIS_VERSION_6_20) {
+ PMD_DRV_LOG(DEBUG, "RSS not supported on this host");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&in, 0, sizeof(in));
+ in.ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_CAPS;
+ in.ndis_hdr.ndis_rev = NDIS_RSS_CAPS_REV_2;
+ in.ndis_hdr.ndis_size = NDIS_RSS_CAPS_SIZE;
+
+ caps_len = NDIS_RSS_CAPS_SIZE;
+ error = hn_rndis_query(hv, OID_GEN_RECEIVE_SCALE_CAPABILITIES,
+ &in, NDIS_RSS_CAPS_SIZE,
+ &caps, caps_len);
+ if (error)
+ return error;
+
+ PMD_INIT_LOG(DEBUG, "RX rings %u indirect %u caps %#x",
+ caps.ndis_nrxr, caps.ndis_nind, caps.ndis_caps);
+ /*
+ * Preliminary verification.
+ */
+ if (caps.ndis_hdr.ndis_type != NDIS_OBJTYPE_RSS_CAPS) {
+ PMD_DRV_LOG(ERR, "invalid NDIS objtype 0x%02x",
+ caps.ndis_hdr.ndis_type);
+ return -EINVAL;
+ }
+ if (caps.ndis_hdr.ndis_rev < NDIS_RSS_CAPS_REV_1) {
+ PMD_DRV_LOG(ERR, "invalid NDIS objrev 0x%02x",
+ caps.ndis_hdr.ndis_rev);
+ return -EINVAL;
+ }
+ if (caps.ndis_hdr.ndis_size > caps_len) {
+ PMD_DRV_LOG(ERR,
+ "invalid NDIS objsize %u, data size %u",
+ caps.ndis_hdr.ndis_size, caps_len);
+ return -EINVAL;
+ } else if (caps.ndis_hdr.ndis_size < NDIS_RSS_CAPS_SIZE_6_0) {
+ PMD_DRV_LOG(ERR, "invalid NDIS objsize %u",
+ caps.ndis_hdr.ndis_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Save information for later RSS configuration.
+ */
+ if (caps.ndis_nrxr == 0) {
+ PMD_DRV_LOG(ERR, "0 RX rings!?");
+ return -EINVAL;
+ }
+ rxr_cnt = caps.ndis_nrxr;
+
+ if (caps.ndis_hdr.ndis_size == NDIS_RSS_CAPS_SIZE &&
+ caps.ndis_hdr.ndis_rev >= NDIS_RSS_CAPS_REV_2) {
+ if (caps.ndis_nind > NDIS_HASH_INDCNT) {
+ PMD_DRV_LOG(ERR,
+ "too many RSS indirect table entries %u",
+ caps.ndis_nind);
+ return -EOPNOTSUPP;
+ }
+ if (!rte_is_power_of_2(caps.ndis_nind)) {
+ PMD_DRV_LOG(ERR,
+ "RSS indirect table size is not power-of-2 %u",
+ caps.ndis_nind);
+ }
+
+ indsz = caps.ndis_nind;
+ } else {
+ indsz = NDIS_HASH_INDCNT;
+ }
+
+ if (indsz < rxr_cnt) {
+ PMD_DRV_LOG(NOTICE,
+ "# of RX rings (%d) > RSS indirect table size %d",
+ rxr_cnt, indsz);
+ rxr_cnt = indsz;
+ }
+
+ hv->rss_offloads = 0;
+ if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
+ hv->rss_offloads |= ETH_RSS_IPV4
+ | ETH_RSS_NONFRAG_IPV4_TCP
+ | ETH_RSS_NONFRAG_IPV4_UDP;
+ if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
+ hv->rss_offloads |= ETH_RSS_IPV6
+ | ETH_RSS_NONFRAG_IPV6_TCP;
+ if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
+ hv->rss_offloads |= ETH_RSS_IPV6_EX
+ | ETH_RSS_IPV6_TCP_EX;
+
+ /* Commit! */
+ *rxr_cnt0 = rxr_cnt;
+
+ return 0;
+}
+
+static int
+hn_rndis_set(struct hn_data *hv, uint32_t oid, const void *data, uint32_t dlen)
+{
+ struct rndis_set_req *req;
+ struct rndis_set_comp comp;
+ uint32_t reqlen, comp_len;
+ uint32_t rid;
+ int error;
+
+ reqlen = sizeof(*req) + dlen;
+ req = rte_zmalloc("RNDIS_SET", reqlen, PAGE_SIZE);
+ if (!req)
+ return -ENOMEM;
+
+ rid = hn_rndis_rid(hv);
+ req->type = RNDIS_SET_MSG;
+ req->len = reqlen;
+ req->rid = rid;
+ req->oid = oid;
+ req->infobuflen = dlen;
+ req->infobufoffset = RNDIS_SET_REQ_INFOBUFOFFSET;
+
+ /* Data immediately follows RNDIS set. */
+ memcpy(req + 1, data, dlen);
+
+ comp_len = sizeof(comp);
+ error = hn_rndis_execute(hv, rid, req, reqlen,
+ &comp, comp_len,
+ RNDIS_SET_CMPLT);
+ if (error) {
+ PMD_DRV_LOG(ERR, "exec RNDIS set %#" PRIx32 " failed",
+ oid);
+ error = EIO;
+ goto done;
+ }
+
+ if (comp.status != RNDIS_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "RNDIS set %#" PRIx32 " failed: status %#" PRIx32,
+ oid, comp.status);
+ error = EIO;
+ goto done;
+ }
+
+done:
+ rte_free(req);
+ return error;
+}
+
+int hn_rndis_conf_offload(struct hn_data *hv,
+ uint64_t tx_offloads, uint64_t rx_offloads)
+{
+ struct ndis_offload_params params;
+ struct ndis_offload hwcaps;
+ int error;
+
+ error = hn_rndis_query_hwcaps(hv, &hwcaps);
+ if (error) {
+ PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error);
+ return error;
+ }
+
+ /* NOTE: 0 means "no change" */
+ memset(&params, 0, sizeof(params));
+
+ params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
+ if (hv->ndis_ver < NDIS_VERSION_6_30) {
+ params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
+ params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
+ } else {
+ params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
+ params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4)
+ params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+
+ if (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_TCP6)
+ params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) {
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4)
+ == NDIS_RXCSUM_CAP_TCP4)
+ params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+
+ if ((hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6)
+ == NDIS_RXCSUM_CAP_TCP6)
+ params.ndis_tcp6csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4)
+ params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+
+ if ((hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6)
+ == NDIS_TXCSUM_CAP_UDP6)
+ params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+ }
+
+ if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4)
+ params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+
+ if (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6)
+ params.ndis_udp6csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4)
+ == NDIS_TXCSUM_CAP_IP4)
+ params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+ }
+ if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
+ params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023)
+ params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON;
+ else
+ goto unsupported;
+
+ if ((hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
+ == HN_NDIS_LSOV2_CAP_IP6)
+ params.ndis_lsov2_ip6 = NDIS_OFFLOAD_LSOV2_ON;
+ else
+ goto unsupported;
+ }
+
+ error = hn_rndis_set(hv, OID_TCP_OFFLOAD_PARAMETERS, &params,
+ params.ndis_hdr.ndis_size);
+ if (error) {
+ PMD_DRV_LOG(ERR, "offload config failed");
+ return error;
+ }
+
+ return 0;
+ unsupported:
+ PMD_DRV_LOG(NOTICE,
+ "offload tx:%" PRIx64 " rx:%" PRIx64 " not supported by this version",
+ tx_offloads, rx_offloads);
+ return -EINVAL;
+}
+
+int hn_rndis_get_offload(struct hn_data *hv,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ndis_offload hwcaps;
+ int error;
+
+ memset(&hwcaps, 0, sizeof(hwcaps));
+
+ error = hn_rndis_query_hwcaps(hv, &hwcaps);
+ if (error) {
+ PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error);
+ return error;
+ }
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4)
+ == HN_NDIS_TXCSUM_CAP_IP4)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4)
+ == HN_NDIS_TXCSUM_CAP_TCP4 &&
+ (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6)
+ == HN_NDIS_TXCSUM_CAP_TCP6)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) &&
+ (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6))
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM;
+
+ if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) &&
+ (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
+ == HN_NDIS_LSOV2_CAP_IP6)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+
+ if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) &&
+ (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) &&
+ (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM;
+
+ return 0;
+}
+
+int
+hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter)
+{
+ int error;
+
+ error = hn_rndis_set(hv, OID_GEN_CURRENT_PACKET_FILTER,
+ &filter, sizeof(filter));
+ if (error) {
+ PMD_DRV_LOG(ERR, "set RX filter %#" PRIx32 " failed: %d",
+ filter, error);
+ } else {
+ PMD_DRV_LOG(DEBUG, "set RX filter %#" PRIx32 " done", filter);
+ }
+
+ return error;
+}
+
+/* The default RSS key.
+ * This value is the same as MLX5 so that flows will be
+ * received on same path for both VF ans synthetic NIC.
+ */
+static const uint8_t rss_default_key[NDIS_HASH_KEYSIZE_TOEPLITZ] = {
+ 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
+ 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
+ 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
+ 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
+ 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a,
+};
+
+int hn_rndis_conf_rss(struct hn_data *hv,
+ const struct rte_eth_rss_conf *rss_conf)
+{
+ struct ndis_rssprm_toeplitz rssp;
+ struct ndis_rss_params *prm = &rssp.rss_params;
+ const uint8_t *rss_key = rss_conf->rss_key ? : rss_default_key;
+ uint32_t rss_hash;
+ unsigned int i;
+ int error;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(&rssp, 0, sizeof(rssp));
+
+ prm->ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_PARAMS;
+ prm->ndis_hdr.ndis_rev = NDIS_RSS_PARAMS_REV_2;
+ prm->ndis_hdr.ndis_size = sizeof(*prm);
+ prm->ndis_flags = 0;
+
+ rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
+ if (rss_conf->rss_hf & ETH_RSS_IPV4)
+ rss_hash |= NDIS_HASH_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ rss_hash |= NDIS_HASH_TCP_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_IPV6)
+ rss_hash |= NDIS_HASH_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ rss_hash |= NDIS_HASH_TCP_IPV6;
+
+ prm->ndis_hash = rss_hash;
+ prm->ndis_indsize = sizeof(rssp.rss_ind[0]) * NDIS_HASH_INDCNT;
+ prm->ndis_indoffset = offsetof(struct ndis_rssprm_toeplitz, rss_ind[0]);
+ prm->ndis_keysize = NDIS_HASH_KEYSIZE_TOEPLITZ;
+ prm->ndis_keyoffset = offsetof(struct ndis_rssprm_toeplitz, rss_key[0]);
+
+ for (i = 0; i < NDIS_HASH_INDCNT; i++)
+ rssp.rss_ind[i] = i % hv->num_queues;
+
+ /* Set hask key values */
+ memcpy(&rssp.rss_key, rss_key, NDIS_HASH_KEYSIZE_TOEPLITZ);
+
+ error = hn_rndis_set(hv, OID_GEN_RECEIVE_SCALE_PARAMETERS,
+ &rssp, sizeof(rssp));
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "RSS config num queues=%u failed: %d",
+ hv->num_queues, error);
+ }
+ return error;
+}
+
+static int hn_rndis_init(struct hn_data *hv)
+{
+ struct rndis_init_req *req;
+ struct rndis_init_comp comp;
+ uint32_t comp_len, rid;
+ int error;
+
+ req = hn_rndis_alloc(hv, sizeof(*req));
+ if (!req) {
+ PMD_DRV_LOG(ERR, "no memory for RNDIS init");
+ return -ENXIO;
+ }
+
+ rid = hn_rndis_rid(hv);
+ req->type = RNDIS_INITIALIZE_MSG;
+ req->len = sizeof(*req);
+ req->rid = rid;
+ req->ver_major = RNDIS_VERSION_MAJOR;
+ req->ver_minor = RNDIS_VERSION_MINOR;
+ req->max_xfersz = HN_RNDIS_XFER_SIZE;
+
+ comp_len = RNDIS_INIT_COMP_SIZE_MIN;
+ error = hn_rndis_execute(hv, rid, req, sizeof(*req),
+ &comp, comp_len,
+ RNDIS_INITIALIZE_CMPLT);
+ if (error)
+ goto done;
+
+ if (comp.status != RNDIS_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR, "RNDIS init failed: status 0x%08x",
+ comp.status);
+ error = -EIO;
+ goto done;
+ }
+
+ hv->rndis_agg_size = comp.pktmaxsz;
+ hv->rndis_agg_pkts = comp.pktmaxcnt;
+ hv->rndis_agg_align = 1U << comp.align;
+
+ if (hv->rndis_agg_align < sizeof(uint32_t)) {
+ /*
+ * The RNDIS packet message encap assumes that the RNDIS
+ * packet message is at least 4 bytes aligned. Fix up the
+ * alignment here, if the remote side sets the alignment
+ * too low.
+ */
+ PMD_DRV_LOG(NOTICE,
+ "fixup RNDIS aggpkt align: %u -> %zu",
+ hv->rndis_agg_align, sizeof(uint32_t));
+ hv->rndis_agg_align = sizeof(uint32_t);
+ }
+
+ PMD_INIT_LOG(INFO,
+ "RNDIS ver %u.%u, aggpkt size %u, aggpkt cnt %u, aggpkt align %u",
+ comp.ver_major, comp.ver_minor,
+ hv->rndis_agg_size, hv->rndis_agg_pkts,
+ hv->rndis_agg_align);
+ error = 0;
+done:
+ rte_free(req);
+ return error;
+}
+
+int
+hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr)
+{
+ uint32_t eaddr_len;
+ int error;
+
+ eaddr_len = ETHER_ADDR_LEN;
+ error = hn_rndis_query(hv, OID_802_3_PERMANENT_ADDRESS, NULL, 0,
+ eaddr, eaddr_len);
+ if (error)
+ return error;
+
+ PMD_DRV_LOG(INFO, "MAC address %02x:%02x:%02x:%02x:%02x:%02x",
+ eaddr[0], eaddr[1], eaddr[2],
+ eaddr[3], eaddr[4], eaddr[5]);
+ return 0;
+}
+
+int
+hn_rndis_get_linkstatus(struct hn_data *hv)
+{
+ return hn_rndis_query(hv, OID_GEN_MEDIA_CONNECT_STATUS, NULL, 0,
+ &hv->link_status, sizeof(uint32_t));
+}
+
+int
+hn_rndis_get_linkspeed(struct hn_data *hv)
+{
+ return hn_rndis_query(hv, OID_GEN_LINK_SPEED, NULL, 0,
+ &hv->link_speed, sizeof(uint32_t));
+}
+
+int
+hn_rndis_attach(struct hn_data *hv)
+{
+ /* Initialize RNDIS. */
+ return hn_rndis_init(hv);
+}
+
+void
+hn_rndis_detach(struct hn_data *hv)
+{
+ /* Halt the RNDIS. */
+ hn_rndis_halt(hv);
+}
diff --git a/drivers/net/netvsc/hn_rndis.h b/drivers/net/netvsc/hn_rndis.h
new file mode 100644
index 00000000..89e2e6ba
--- /dev/null
+++ b/drivers/net/netvsc/hn_rndis.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#include "rndis.h"
+
+struct hn_data;
+
+void hn_rndis_receive_response(struct hn_data *hv,
+ const void *data, uint32_t len);
+void hn_rndis_link_status(struct hn_data *hv, const void *data);
+int hn_rndis_attach(struct hn_data *hv);
+void hn_rndis_detach(struct hn_data *hv);
+int hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr);
+int hn_rndis_get_linkstatus(struct hn_data *hv);
+int hn_rndis_get_linkspeed(struct hn_data *hv);
+int hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter);
+void hn_rndis_rx_ctrl(struct hn_data *hv, const void *data,
+ int dlen);
+int hn_rndis_get_offload(struct hn_data *hv,
+ struct rte_eth_dev_info *dev_info);
+int hn_rndis_conf_offload(struct hn_data *hv,
+ uint64_t tx_offloads,
+ uint64_t rx_offloads);
+int hn_rndis_query_rsscaps(struct hn_data *hv,
+ unsigned int *rxr_cnt0);
+int hn_rndis_conf_rss(struct hn_data *hv,
+ const struct rte_eth_rss_conf *rss_conf);
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP
+void hn_rndis_dump(const void *buf);
+#else
+#define hn_rndis_dump(buf)
+#endif
diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c
new file mode 100644
index 00000000..02ef27e3
--- /dev/null
+++ b/drivers/net/netvsc/hn_rxtx.c
@@ -0,0 +1,1334 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Microsoft Corporation
+ * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <strings.h>
+
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_net.h>
+#include <rte_bus_vmbus.h>
+#include <rte_spinlock.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_rndis.h"
+#include "hn_nvs.h"
+#include "ndis.h"
+
+#define HN_NVS_SEND_MSG_SIZE \
+ (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis))
+
+#define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */
+#define HN_TXCOPY_THRESHOLD 512
+
+#define HN_RXCOPY_THRESHOLD 256
+#define HN_RXQ_EVENT_DEFAULT 2048
+
+struct hn_rxinfo {
+ uint32_t vlan_info;
+ uint32_t csum_info;
+ uint32_t hash_info;
+ uint32_t hash_value;
+};
+
+#define HN_RXINFO_VLAN 0x0001
+#define HN_RXINFO_CSUM 0x0002
+#define HN_RXINFO_HASHINF 0x0004
+#define HN_RXINFO_HASHVAL 0x0008
+#define HN_RXINFO_ALL \
+ (HN_RXINFO_VLAN | \
+ HN_RXINFO_CSUM | \
+ HN_RXINFO_HASHINF | \
+ HN_RXINFO_HASHVAL)
+
+#define HN_NDIS_VLAN_INFO_INVALID 0xffffffff
+#define HN_NDIS_RXCSUM_INFO_INVALID 0
+#define HN_NDIS_HASH_INFO_INVALID 0
+
+/*
+ * Per-transmit book keeping.
+ * A slot in transmit ring (chim_index) is reserved for each transmit.
+ *
+ * There are two types of transmit:
+ * - buffered transmit where chimney buffer is used and RNDIS header
+ * is in the buffer. mbuf == NULL for this case.
+ *
+ * - direct transmit where RNDIS header is in the in rndis_pkt
+ * mbuf is freed after transmit.
+ *
+ * Descriptors come from per-port pool which is used
+ * to limit number of outstanding requests per device.
+ */
+struct hn_txdesc {
+ struct rte_mbuf *m;
+
+ uint16_t queue_id;
+ uint16_t chim_index;
+ uint32_t chim_size;
+ uint32_t data_size;
+ uint32_t packets;
+
+ struct rndis_packet_msg *rndis_pkt;
+};
+
+#define HN_RNDIS_PKT_LEN \
+ (sizeof(struct rndis_packet_msg) + \
+ RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) + \
+ RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \
+ RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \
+ RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE))
+
+/* Minimum space required for a packet */
+#define HN_PKTSIZE_MIN(align) \
+ RTE_ALIGN(ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
+
+#define DEFAULT_TX_FREE_THRESH 32U
+
+static void
+hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m)
+{
+ uint32_t s = m->pkt_len;
+ const struct ether_addr *ea;
+
+ if (s == 64) {
+ stats->size_bins[1]++;
+ } else if (s > 64 && s < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (s < 64)
+ stats->size_bins[0]++;
+ else if (s < 1519)
+ stats->size_bins[6]++;
+ else if (s >= 1519)
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(m, const struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+}
+
+static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt)
+{
+ return pkt->pktinfooffset + pkt->pktinfolen;
+}
+
+static inline uint32_t
+hn_rndis_pktmsg_offset(uint32_t ofs)
+{
+ return ofs - offsetof(struct rndis_packet_msg, dataoffset);
+}
+
+static void hn_txd_init(struct rte_mempool *mp __rte_unused,
+ void *opaque, void *obj, unsigned int idx)
+{
+ struct hn_txdesc *txd = obj;
+ struct rte_eth_dev *dev = opaque;
+ struct rndis_packet_msg *pkt;
+
+ memset(txd, 0, sizeof(*txd));
+ txd->chim_index = idx;
+
+ pkt = rte_malloc_socket("RNDIS_TX", HN_RNDIS_PKT_LEN,
+ rte_align32pow2(HN_RNDIS_PKT_LEN),
+ dev->device->numa_node);
+ if (!pkt)
+ rte_exit(EXIT_FAILURE, "can not allocate RNDIS header");
+
+ txd->rndis_pkt = pkt;
+}
+
+/*
+ * Unlike Linux and FreeBSD, this driver uses a mempool
+ * to limit outstanding transmits and reserve buffers
+ */
+int
+hn_tx_pool_init(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ char name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(name, sizeof(name),
+ "hn_txd_%u", dev->data->port_id);
+
+ PMD_INIT_LOG(DEBUG, "create a TX send pool %s n=%u size=%zu socket=%d",
+ name, hv->chim_cnt, sizeof(struct hn_txdesc),
+ dev->device->numa_node);
+
+ mp = rte_mempool_create(name, hv->chim_cnt, sizeof(struct hn_txdesc),
+ HN_TXD_CACHE_SIZE, 0,
+ NULL, NULL,
+ hn_txd_init, dev,
+ dev->device->numa_node, 0);
+ if (!mp) {
+ PMD_DRV_LOG(ERR,
+ "mempool %s create failed: %d", name, rte_errno);
+ return -rte_errno;
+ }
+
+ hv->tx_pool = mp;
+ return 0;
+}
+
+static void hn_reset_txagg(struct hn_tx_queue *txq)
+{
+ txq->agg_szleft = txq->agg_szmax;
+ txq->agg_pktleft = txq->agg_pktmax;
+ txq->agg_txd = NULL;
+ txq->agg_prevpkt = NULL;
+}
+
+int
+hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct hn_tx_queue *txq;
+ uint32_t tx_free_thresh;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq)
+ return -ENOMEM;
+
+ txq->hv = hv;
+ txq->chan = hv->channels[queue_idx];
+ txq->port_id = dev->data->port_id;
+ txq->queue_id = queue_idx;
+
+ tx_free_thresh = tx_conf->tx_free_thresh;
+ if (tx_free_thresh == 0)
+ tx_free_thresh = RTE_MIN(hv->chim_cnt / 4,
+ DEFAULT_TX_FREE_THRESH);
+
+ if (tx_free_thresh >= hv->chim_cnt - 3)
+ tx_free_thresh = hv->chim_cnt - 3;
+
+ txq->free_thresh = tx_free_thresh;
+
+ txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size);
+ txq->agg_pktmax = hv->rndis_agg_pkts;
+ txq->agg_align = hv->rndis_agg_align;
+
+ hn_reset_txagg(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
+void
+hn_dev_tx_queue_release(void *arg)
+{
+ struct hn_tx_queue *txq = arg;
+ struct hn_txdesc *txd;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!txq)
+ return;
+
+ /* If any pending data is still present just drop it */
+ txd = txq->agg_txd;
+ if (txd)
+ rte_mempool_put(txq->hv->tx_pool, txd);
+
+ rte_free(txq);
+}
+
+void
+hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct hn_tx_queue *txq = dev->data->rx_queues[queue_idx];
+
+ qinfo->conf.tx_free_thresh = txq->free_thresh;
+ qinfo->nb_desc = hv->tx_pool->size;
+}
+
+static void
+hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
+ unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
+{
+ struct hn_txdesc *txd = (struct hn_txdesc *)xactid;
+ struct hn_tx_queue *txq;
+
+ /* Control packets are sent with xacid == 0 */
+ if (!txd)
+ return;
+
+ txq = dev->data->tx_queues[queue_id];
+ if (likely(ack->status == NVS_STATUS_OK)) {
+ PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u",
+ txq->port_id, txq->queue_id, txd->chim_index,
+ txd->packets, txd->data_size);
+ txq->stats.bytes += txd->data_size;
+ txq->stats.packets += txd->packets;
+ } else {
+ PMD_TX_LOG(NOTICE, "port %u:%u complete tx %u failed status %u",
+ txq->port_id, txq->queue_id, txd->chim_index, ack->status);
+ ++txq->stats.errors;
+ }
+
+ rte_pktmbuf_free(txd->m);
+
+ rte_mempool_put(txq->hv->tx_pool, txd);
+}
+
+/* Handle transmit completion events */
+static void
+hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id,
+ const struct vmbus_chanpkt_hdr *pkt,
+ const void *data)
+{
+ const struct hn_nvs_hdr *hdr = data;
+
+ switch (hdr->type) {
+ case NVS_TYPE_RNDIS_ACK:
+ hn_nvs_send_completed(dev, queue_id, pkt->xactid, data);
+ break;
+
+ default:
+ PMD_TX_LOG(NOTICE,
+ "unexpected send completion type %u",
+ hdr->type);
+ }
+}
+
+/* Parse per-packet info (meta data) */
+static int
+hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen,
+ struct hn_rxinfo *info)
+{
+ const struct rndis_pktinfo *pi = info_data;
+ uint32_t mask = 0;
+
+ while (info_dlen != 0) {
+ const void *data;
+ uint32_t dlen;
+
+ if (unlikely(info_dlen < sizeof(*pi)))
+ return -EINVAL;
+
+ if (unlikely(info_dlen < pi->size))
+ return -EINVAL;
+ info_dlen -= pi->size;
+
+ if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK))
+ return -EINVAL;
+ if (unlikely(pi->size < pi->offset))
+ return -EINVAL;
+
+ dlen = pi->size - pi->offset;
+ data = pi->data;
+
+ switch (pi->type) {
+ case NDIS_PKTINFO_TYPE_VLAN:
+ if (unlikely(dlen < NDIS_VLAN_INFO_SIZE))
+ return -EINVAL;
+ info->vlan_info = *((const uint32_t *)data);
+ mask |= HN_RXINFO_VLAN;
+ break;
+
+ case NDIS_PKTINFO_TYPE_CSUM:
+ if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE))
+ return -EINVAL;
+ info->csum_info = *((const uint32_t *)data);
+ mask |= HN_RXINFO_CSUM;
+ break;
+
+ case NDIS_PKTINFO_TYPE_HASHVAL:
+ if (unlikely(dlen < NDIS_HASH_VALUE_SIZE))
+ return -EINVAL;
+ info->hash_value = *((const uint32_t *)data);
+ mask |= HN_RXINFO_HASHVAL;
+ break;
+
+ case NDIS_PKTINFO_TYPE_HASHINF:
+ if (unlikely(dlen < NDIS_HASH_INFO_SIZE))
+ return -EINVAL;
+ info->hash_info = *((const uint32_t *)data);
+ mask |= HN_RXINFO_HASHINF;
+ break;
+
+ default:
+ goto next;
+ }
+
+ if (mask == HN_RXINFO_ALL)
+ break; /* All found; done */
+next:
+ pi = (const struct rndis_pktinfo *)
+ ((const uint8_t *)pi + pi->size);
+ }
+
+ /*
+ * Final fixup.
+ * - If there is no hash value, invalidate the hash info.
+ */
+ if (!(mask & HN_RXINFO_HASHVAL))
+ info->hash_info = HN_NDIS_HASH_INFO_INVALID;
+ return 0;
+}
+
+/*
+ * Ack the consumed RXBUF associated w/ this channel packet,
+ * so that this RXBUF can be recycled by the hypervisor.
+ */
+static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb)
+{
+ struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo;
+ struct hn_data *hv = rxb->hv;
+
+ if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) {
+ hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
+ --hv->rxbuf_outstanding;
+ }
+}
+
+static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
+{
+ hn_rx_buf_release(opaque);
+}
+
+static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq,
+ const struct vmbus_chanpkt_rxbuf *pkt)
+{
+ struct hn_rx_bufinfo *rxb;
+
+ rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid;
+ rxb->chan = rxq->chan;
+ rxb->xactid = pkt->hdr.xactid;
+ rxb->hv = rxq->hv;
+
+ rxb->shinfo.free_cb = hn_rx_buf_free_cb;
+ rxb->shinfo.fcb_opaque = rxb;
+ rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1);
+ return rxb;
+}
+
+static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
+ uint8_t *data, unsigned int headroom, unsigned int dlen,
+ const struct hn_rxinfo *info)
+{
+ struct hn_data *hv = rxq->hv;
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(rxq->mb_pool);
+ if (unlikely(!m)) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxq->port_id];
+
+ dev->data->rx_mbuf_alloc_failed++;
+ return;
+ }
+
+ /*
+ * For large packets, avoid copy if possible but need to keep
+ * some space available in receive area for later packets.
+ */
+ if (dlen >= HN_RXCOPY_THRESHOLD &&
+ hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) {
+ struct rte_mbuf_ext_shared_info *shinfo;
+ const void *rxbuf;
+ rte_iova_t iova;
+
+ /*
+ * Build an external mbuf that points to recveive area.
+ * Use refcount to handle multiple packets in same
+ * receive buffer section.
+ */
+ rxbuf = hv->rxbuf_res->addr;
+ iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
+ shinfo = &rxb->shinfo;
+
+ if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1)
+ ++hv->rxbuf_outstanding;
+
+ rte_pktmbuf_attach_extbuf(m, data, iova,
+ dlen + headroom, shinfo);
+ m->data_off = headroom;
+ } else {
+ /* Mbuf's in pool must be large enough to hold small packets */
+ if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) {
+ rte_pktmbuf_free_seg(m);
+ ++rxq->stats.errors;
+ return;
+ }
+ rte_memcpy(rte_pktmbuf_mtod(m, void *),
+ data + headroom, dlen);
+ }
+
+ m->port = rxq->port_id;
+ m->pkt_len = dlen;
+ m->data_len = dlen;
+ m->packet_type = rte_net_get_ptype(m, NULL,
+ RTE_PTYPE_L2_MASK |
+ RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+
+ if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
+ m->vlan_tci = info->vlan_info;
+ m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+ }
+
+ if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
+ if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK)
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK
+ | NDIS_RXCSUM_INFO_TCPCS_OK))
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED
+ | NDIS_RXCSUM_INFO_UDPCS_FAILED))
+ m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+
+ if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
+ m->ol_flags |= PKT_RX_RSS_HASH;
+ m->hash.rss = info->hash_value;
+ }
+
+ PMD_RX_LOG(DEBUG,
+ "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64,
+ rxq->port_id, rxq->queue_id, rxb->xactid,
+ m->pkt_len, m->packet_type, m->ol_flags);
+
+ ++rxq->stats.packets;
+ rxq->stats.bytes += m->pkt_len;
+ hn_update_packet_stats(&rxq->stats, m);
+
+ if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
+ ++rxq->ring_full;
+ rte_pktmbuf_free(m);
+ }
+}
+
+static void hn_rndis_rx_data(struct hn_rx_queue *rxq,
+ struct hn_rx_bufinfo *rxb,
+ void *data, uint32_t dlen)
+{
+ unsigned int data_off, data_len, pktinfo_off, pktinfo_len;
+ const struct rndis_packet_msg *pkt = data;
+ struct hn_rxinfo info = {
+ .vlan_info = HN_NDIS_VLAN_INFO_INVALID,
+ .csum_info = HN_NDIS_RXCSUM_INFO_INVALID,
+ .hash_info = HN_NDIS_HASH_INFO_INVALID,
+ };
+ int err;
+
+ hn_rndis_dump(pkt);
+
+ if (unlikely(dlen < sizeof(*pkt)))
+ goto error;
+
+ if (unlikely(dlen < pkt->len))
+ goto error; /* truncated RNDIS from host */
+
+ if (unlikely(pkt->len < pkt->datalen
+ + pkt->oobdatalen + pkt->pktinfolen))
+ goto error;
+
+ if (unlikely(pkt->datalen == 0))
+ goto error;
+
+ /* Check offsets. */
+ if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN))
+ goto error;
+
+ if (likely(pkt->pktinfooffset > 0) &&
+ unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN ||
+ (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK)))
+ goto error;
+
+ data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
+ data_len = pkt->datalen;
+ pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset);
+ pktinfo_len = pkt->pktinfolen;
+
+ if (likely(pktinfo_len > 0)) {
+ err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off,
+ pktinfo_len, &info);
+ if (err)
+ goto error;
+ }
+
+ if (unlikely(data_off + data_len > pkt->len))
+ goto error;
+
+ if (unlikely(data_len < ETHER_HDR_LEN))
+ goto error;
+
+ hn_rxpkt(rxq, rxb, data, data_off, data_len, &info);
+ return;
+error:
+ ++rxq->stats.errors;
+}
+
+static void
+hn_rndis_receive(const struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
+ struct hn_rx_bufinfo *rxb, void *buf, uint32_t len)
+{
+ const struct rndis_msghdr *hdr = buf;
+
+ switch (hdr->type) {
+ case RNDIS_PACKET_MSG:
+ if (dev->data->dev_started)
+ hn_rndis_rx_data(rxq, rxb, buf, len);
+ break;
+
+ case RNDIS_INDICATE_STATUS_MSG:
+ hn_rndis_link_status(rxq->hv, buf);
+ break;
+
+ case RNDIS_INITIALIZE_CMPLT:
+ case RNDIS_QUERY_CMPLT:
+ case RNDIS_SET_CMPLT:
+ hn_rndis_receive_response(rxq->hv, buf, len);
+ break;
+
+ default:
+ PMD_DRV_LOG(NOTICE,
+ "unexpected RNDIS message (type %#x len %u)",
+ hdr->type, len);
+ break;
+ }
+}
+
+static void
+hn_nvs_handle_rxbuf(struct rte_eth_dev *dev,
+ struct hn_data *hv,
+ struct hn_rx_queue *rxq,
+ const struct vmbus_chanpkt_hdr *hdr,
+ const void *buf)
+{
+ const struct vmbus_chanpkt_rxbuf *pkt;
+ const struct hn_nvs_hdr *nvs_hdr = buf;
+ uint32_t rxbuf_sz = hv->rxbuf_res->len;
+ char *rxbuf = hv->rxbuf_res->addr;
+ unsigned int i, hlen, count;
+ struct hn_rx_bufinfo *rxb;
+
+ /* At minimum we need type header */
+ if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) {
+ PMD_RX_LOG(ERR, "invalid receive nvs RNDIS");
+ return;
+ }
+
+ /* Make sure that this is a RNDIS message. */
+ if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) {
+ PMD_RX_LOG(ERR, "nvs type %u, not RNDIS",
+ nvs_hdr->type);
+ return;
+ }
+
+ hlen = vmbus_chanpkt_getlen(hdr->hlen);
+ if (unlikely(hlen < sizeof(*pkt))) {
+ PMD_RX_LOG(ERR, "invalid rxbuf chanpkt");
+ return;
+ }
+
+ pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr);
+ if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) {
+ PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x",
+ pkt->rxbuf_id);
+ return;
+ }
+
+ count = pkt->rxbuf_cnt;
+ if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf,
+ rxbuf[count]))) {
+ PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count);
+ return;
+ }
+
+ if (pkt->hdr.xactid > hv->rxbuf_section_cnt) {
+ PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64,
+ pkt->hdr.xactid);
+ return;
+ }
+
+ /* Setup receive buffer info to allow for callback */
+ rxb = hn_rx_buf_init(rxq, pkt);
+
+ /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */
+ for (i = 0; i < count; ++i) {
+ unsigned int ofs, len;
+
+ ofs = pkt->rxbuf[i].ofs;
+ len = pkt->rxbuf[i].len;
+
+ if (unlikely(ofs + len > rxbuf_sz)) {
+ PMD_RX_LOG(ERR,
+ "%uth RNDIS msg overflow ofs %u, len %u",
+ i, ofs, len);
+ continue;
+ }
+
+ if (unlikely(len == 0)) {
+ PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len);
+ continue;
+ }
+
+ hn_rndis_receive(dev, rxq, rxb,
+ rxbuf + ofs, len);
+ }
+
+ /* Send ACK now if external mbuf not used */
+ hn_rx_buf_release(rxb);
+}
+
+struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
+ uint16_t queue_id,
+ unsigned int socket_id)
+{
+ struct hn_rx_queue *rxq;
+
+ rxq = rte_zmalloc_socket("HN_RXQ",
+ sizeof(*rxq) + HN_RXQ_EVENT_DEFAULT,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq) {
+ rxq->hv = hv;
+ rxq->chan = hv->channels[queue_id];
+ rte_spinlock_init(&rxq->ring_lock);
+ rxq->port_id = hv->port_id;
+ rxq->queue_id = queue_id;
+ }
+ return rxq;
+}
+
+int
+hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ char ring_name[RTE_RING_NAMESIZE];
+ struct hn_rx_queue *rxq;
+ unsigned int count;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue_idx == 0) {
+ rxq = hv->primary;
+ } else {
+ rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id);
+ if (!rxq)
+ return -ENOMEM;
+ }
+
+ rxq->mb_pool = mp;
+ count = rte_mempool_avail_count(mp) / dev->data->nb_rx_queues;
+ if (nb_desc == 0 || nb_desc > count)
+ nb_desc = count;
+
+ /*
+ * Staging ring from receive event logic to rx_pkts.
+ * rx_pkts assumes caller is handling multi-thread issue.
+ * event logic has locking.
+ */
+ snprintf(ring_name, sizeof(ring_name),
+ "hn_rx_%u_%u", dev->data->port_id, queue_idx);
+ rxq->rx_ring = rte_ring_create(ring_name,
+ rte_align32pow2(nb_desc),
+ socket_id, 0);
+ if (!rxq->rx_ring)
+ goto fail;
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ return 0;
+
+fail:
+ rte_ring_free(rxq->rx_ring);
+ rte_free(rxq->event_buf);
+ rte_free(rxq);
+ return -ENOMEM;
+}
+
+void
+hn_dev_rx_queue_release(void *arg)
+{
+ struct hn_rx_queue *rxq = arg;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!rxq)
+ return;
+
+ rte_ring_free(rxq->rx_ring);
+ rxq->rx_ring = NULL;
+ rxq->mb_pool = NULL;
+
+ if (rxq != rxq->hv->primary) {
+ rte_free(rxq->event_buf);
+ rte_free(rxq);
+ }
+}
+
+void
+hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct hn_rx_queue *rxq = dev->data->rx_queues[queue_idx];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = 1;
+ qinfo->nb_desc = rte_ring_get_capacity(rxq->rx_ring);
+}
+
+static void
+hn_nvs_handle_notify(const struct vmbus_chanpkt_hdr *pkthdr,
+ const void *data)
+{
+ const struct hn_nvs_hdr *hdr = data;
+
+ if (unlikely(vmbus_chanpkt_datalen(pkthdr) < sizeof(*hdr))) {
+ PMD_DRV_LOG(ERR, "invalid nvs notify");
+ return;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "got notify, nvs type %u", hdr->type);
+}
+
+/*
+ * Process pending events on the channel.
+ * Called from both Rx queue poll and Tx cleanup
+ */
+void hn_process_events(struct hn_data *hv, uint16_t queue_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
+ struct hn_rx_queue *rxq;
+ uint32_t bytes_read = 0;
+ int ret = 0;
+
+ rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id];
+
+ /* If no pending data then nothing to do */
+ if (rte_vmbus_chan_rx_empty(rxq->chan))
+ return;
+
+ /*
+ * Since channel is shared between Rx and TX queue need to have a lock
+ * since DPDK does not force same CPU to be used for Rx/Tx.
+ */
+ if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock)))
+ return;
+
+ for (;;) {
+ const struct vmbus_chanpkt_hdr *pkt;
+ uint32_t len = HN_RXQ_EVENT_DEFAULT;
+ const void *data;
+
+ ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len);
+ if (ret == -EAGAIN)
+ break; /* ring is empty */
+
+ else if (ret == -ENOBUFS)
+ rte_exit(EXIT_FAILURE, "event buffer not big enough (%u < %u)",
+ HN_RXQ_EVENT_DEFAULT, len);
+ else if (ret <= 0)
+ rte_exit(EXIT_FAILURE,
+ "vmbus ring buffer error: %d", ret);
+
+ bytes_read += ret;
+ pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf;
+ data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen);
+
+ switch (pkt->type) {
+ case VMBUS_CHANPKT_TYPE_COMP:
+ hn_nvs_handle_comp(dev, queue_id, pkt, data);
+ break;
+
+ case VMBUS_CHANPKT_TYPE_RXBUF:
+ hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data);
+ break;
+
+ case VMBUS_CHANPKT_TYPE_INBAND:
+ hn_nvs_handle_notify(pkt, data);
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type);
+ break;
+ }
+
+ if (rxq->rx_ring && rte_ring_full(rxq->rx_ring))
+ break;
+ }
+
+ if (bytes_read > 0)
+ rte_vmbus_chan_signal_read(rxq->chan, bytes_read);
+
+ rte_spinlock_unlock(&rxq->ring_lock);
+}
+
+static void hn_append_to_chim(struct hn_tx_queue *txq,
+ struct rndis_packet_msg *pkt,
+ const struct rte_mbuf *m)
+{
+ struct hn_txdesc *txd = txq->agg_txd;
+ uint8_t *buf = (uint8_t *)pkt;
+ unsigned int data_offs;
+
+ hn_rndis_dump(pkt);
+
+ data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
+ txd->chim_size += pkt->len;
+ txd->data_size += m->pkt_len;
+ ++txd->packets;
+ hn_update_packet_stats(&txq->stats, m);
+
+ for (; m; m = m->next) {
+ uint16_t len = rte_pktmbuf_data_len(m);
+
+ rte_memcpy(buf + data_offs,
+ rte_pktmbuf_mtod(m, const char *), len);
+ data_offs += len;
+ }
+}
+
+/*
+ * Send pending aggregated data in chimney buffer (if any).
+ * Returns error if send was unsuccessful because channel ring buffer
+ * was full.
+ */
+static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig)
+
+{
+ struct hn_txdesc *txd = txq->agg_txd;
+ struct hn_nvs_rndis rndis;
+ int ret;
+
+ if (!txd)
+ return 0;
+
+ rndis = (struct hn_nvs_rndis) {
+ .type = NVS_TYPE_RNDIS,
+ .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
+ .chim_idx = txd->chim_index,
+ .chim_sz = txd->chim_size,
+ };
+
+ PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u",
+ txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size);
+
+ ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC,
+ &rndis, sizeof(rndis), (uintptr_t)txd, need_sig);
+
+ if (likely(ret == 0))
+ hn_reset_txagg(txq);
+ else
+ PMD_TX_LOG(NOTICE, "port %u:%u send failed: %d",
+ txq->port_id, txq->queue_id, ret);
+
+ return ret;
+}
+
+static struct hn_txdesc *hn_new_txd(struct hn_data *hv,
+ struct hn_tx_queue *txq)
+{
+ struct hn_txdesc *txd;
+
+ if (rte_mempool_get(hv->tx_pool, (void **)&txd)) {
+ ++txq->stats.nomemory;
+ PMD_TX_LOG(DEBUG, "tx pool exhausted!");
+ return NULL;
+ }
+
+ txd->m = NULL;
+ txd->queue_id = txq->queue_id;
+ txd->packets = 0;
+ txd->data_size = 0;
+ txd->chim_size = 0;
+
+ return txd;
+}
+
+static void *
+hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize)
+{
+ struct hn_txdesc *agg_txd = txq->agg_txd;
+ struct rndis_packet_msg *pkt;
+ void *chim;
+
+ if (agg_txd) {
+ unsigned int padding, olen;
+
+ /*
+ * Update the previous RNDIS packet's total length,
+ * it can be increased due to the mandatory alignment
+ * padding for this RNDIS packet. And update the
+ * aggregating txdesc's chimney sending buffer size
+ * accordingly.
+ *
+ * Zero-out the padding, as required by the RNDIS spec.
+ */
+ pkt = txq->agg_prevpkt;
+ olen = pkt->len;
+ padding = RTE_ALIGN(olen, txq->agg_align) - olen;
+ if (padding > 0) {
+ agg_txd->chim_size += padding;
+ pkt->len += padding;
+ memset((uint8_t *)pkt + olen, 0, padding);
+ }
+
+ chim = (uint8_t *)pkt + pkt->len;
+
+ txq->agg_pktleft--;
+ txq->agg_szleft -= pktsize;
+ if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) {
+ /*
+ * Probably can't aggregate more packets,
+ * flush this aggregating txdesc proactively.
+ */
+ txq->agg_pktleft = 0;
+ }
+ } else {
+ agg_txd = hn_new_txd(hv, txq);
+ if (!agg_txd)
+ return NULL;
+
+ chim = (uint8_t *)hv->chim_res->addr
+ + agg_txd->chim_index * hv->chim_szmax;
+
+ txq->agg_txd = agg_txd;
+ txq->agg_pktleft = txq->agg_pktmax - 1;
+ txq->agg_szleft = txq->agg_szmax - pktsize;
+ }
+ txq->agg_prevpkt = chim;
+
+ return chim;
+}
+
+static inline void *
+hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt,
+ uint32_t pi_dlen, uint32_t pi_type)
+{
+ const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen);
+ struct rndis_pktinfo *pi;
+
+ /*
+ * Per-packet-info does not move; it only grows.
+ *
+ * NOTE:
+ * pktinfooffset in this phase counts from the beginning
+ * of rndis_packet_msg.
+ */
+ pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt));
+
+ pkt->pktinfolen += pi_size;
+
+ pi->size = pi_size;
+ pi->type = pi_type;
+ pi->offset = RNDIS_PKTINFO_OFFSET;
+
+ return pi->data;
+}
+
+/* Put RNDIS header and packet info on packet */
+static void hn_encap(struct rndis_packet_msg *pkt,
+ uint16_t queue_id,
+ const struct rte_mbuf *m)
+{
+ unsigned int hlen = m->l2_len + m->l3_len;
+ uint32_t *pi_data;
+ uint32_t pkt_hlen;
+
+ pkt->type = RNDIS_PACKET_MSG;
+ pkt->len = m->pkt_len;
+ pkt->dataoffset = 0;
+ pkt->datalen = m->pkt_len;
+ pkt->oobdataoffset = 0;
+ pkt->oobdatalen = 0;
+ pkt->oobdataelements = 0;
+ pkt->pktinfooffset = sizeof(*pkt);
+ pkt->pktinfolen = 0;
+ pkt->vchandle = 0;
+ pkt->reserved = 0;
+
+ /*
+ * Set the hash value for this packet, to the queue_id to cause
+ * TX done event for this packet on the right channel.
+ */
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE,
+ NDIS_PKTINFO_TYPE_HASHVAL);
+ *pi_data = queue_id;
+
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
+ NDIS_PKTINFO_TYPE_VLAN);
+ *pi_data = m->vlan_tci;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_SEG) {
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE,
+ NDIS_PKTINFO_TYPE_LSO);
+
+ if (m->ol_flags & PKT_TX_IPV6) {
+ *pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen,
+ m->tso_segsz);
+ } else {
+ *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen,
+ m->tso_segsz);
+ }
+ } else if (m->ol_flags &
+ (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)) {
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE,
+ NDIS_PKTINFO_TYPE_CSUM);
+ *pi_data = 0;
+
+ if (m->ol_flags & PKT_TX_IPV6)
+ *pi_data |= NDIS_TXCSUM_INFO_IPV6;
+ if (m->ol_flags & PKT_TX_IPV4) {
+ *pi_data |= NDIS_TXCSUM_INFO_IPV4;
+
+ if (m->ol_flags & PKT_TX_IP_CKSUM)
+ *pi_data |= NDIS_TXCSUM_INFO_IPCS;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_CKSUM)
+ *pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen);
+ else if (m->ol_flags & PKT_TX_UDP_CKSUM)
+ *pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen);
+ }
+
+ pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen;
+ /* Fixup RNDIS packet message total length */
+ pkt->len += pkt_hlen;
+
+ /* Convert RNDIS packet message offsets */
+ pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen);
+ pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset);
+}
+
+/* How many scatter gather list elements ar needed */
+static unsigned int hn_get_slots(const struct rte_mbuf *m)
+{
+ unsigned int slots = 1; /* for RNDIS header */
+
+ while (m) {
+ unsigned int size = rte_pktmbuf_data_len(m);
+ unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
+
+ slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE;
+ m = m->next;
+ }
+
+ return slots;
+}
+
+/* Build scatter gather list from chained mbuf */
+static unsigned int hn_fill_sg(struct vmbus_gpa *sg,
+ const struct rte_mbuf *m)
+{
+ unsigned int segs = 0;
+
+ while (m) {
+ rte_iova_t addr = rte_mbuf_data_iova(m);
+ unsigned int page = addr / PAGE_SIZE;
+ unsigned int offset = addr & PAGE_MASK;
+ unsigned int len = rte_pktmbuf_data_len(m);
+
+ while (len > 0) {
+ unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset);
+
+ sg[segs].page = page;
+ sg[segs].ofs = offset;
+ sg[segs].len = bytes;
+ segs++;
+
+ ++page;
+ offset = 0;
+ len -= bytes;
+ }
+ m = m->next;
+ }
+
+ return segs;
+}
+
+/* Transmit directly from mbuf */
+static int hn_xmit_sg(struct hn_tx_queue *txq,
+ const struct hn_txdesc *txd, const struct rte_mbuf *m,
+ bool *need_sig)
+{
+ struct vmbus_gpa sg[hn_get_slots(m)];
+ struct hn_nvs_rndis nvs_rndis = {
+ .type = NVS_TYPE_RNDIS,
+ .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
+ .chim_sz = txd->chim_size,
+ };
+ rte_iova_t addr;
+ unsigned int segs;
+
+ /* attach aggregation data if present */
+ if (txd->chim_size > 0)
+ nvs_rndis.chim_idx = txd->chim_index;
+ else
+ nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID;
+
+ hn_rndis_dump(txd->rndis_pkt);
+
+ /* pass IOVA of rndis header in first segment */
+ addr = rte_malloc_virt2iova(txd->rndis_pkt);
+ if (unlikely(addr == RTE_BAD_IOVA)) {
+ PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova");
+ return -EINVAL;
+ }
+
+ sg[0].page = addr / PAGE_SIZE;
+ sg[0].ofs = addr & PAGE_MASK;
+ sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
+ segs = 1;
+
+ hn_update_packet_stats(&txq->stats, m);
+
+ segs += hn_fill_sg(sg + 1, m);
+
+ PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u",
+ txq->port_id, txq->queue_id, txd->chim_index,
+ segs, nvs_rndis.chim_sz);
+
+ return hn_nvs_send_sglist(txq->chan, sg, segs,
+ &nvs_rndis, sizeof(nvs_rndis),
+ (uintptr_t)txd, need_sig);
+}
+
+uint16_t
+hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct hn_tx_queue *txq = ptxq;
+ struct hn_data *hv = txq->hv;
+ bool need_sig = false;
+ uint16_t nb_tx;
+ int ret;
+
+ if (unlikely(hv->closed))
+ return 0;
+
+ if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh)
+ hn_process_events(hv, txq->queue_id);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *m = tx_pkts[nb_tx];
+ uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN;
+ struct rndis_packet_msg *pkt;
+
+ /* For small packets aggregate them in chimney buffer */
+ if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) {
+ /* If this packet will not fit, then flush */
+ if (txq->agg_pktleft == 0 ||
+ RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) {
+ if (hn_flush_txagg(txq, &need_sig))
+ goto fail;
+ }
+
+ pkt = hn_try_txagg(hv, txq, pkt_size);
+ if (unlikely(!pkt))
+ break;
+
+ hn_encap(pkt, txq->queue_id, m);
+ hn_append_to_chim(txq, pkt, m);
+
+ rte_pktmbuf_free(m);
+
+ /* if buffer is full, flush */
+ if (txq->agg_pktleft == 0 &&
+ hn_flush_txagg(txq, &need_sig))
+ goto fail;
+ } else {
+ struct hn_txdesc *txd;
+
+ /* can send chimney data and large packet at once */
+ txd = txq->agg_txd;
+ if (txd) {
+ hn_reset_txagg(txq);
+ } else {
+ txd = hn_new_txd(hv, txq);
+ if (unlikely(!txd))
+ break;
+ }
+
+ pkt = txd->rndis_pkt;
+ txd->m = m;
+ txd->data_size += m->pkt_len;
+ ++txd->packets;
+
+ hn_encap(pkt, txq->queue_id, m);
+
+ ret = hn_xmit_sg(txq, txd, m, &need_sig);
+ if (unlikely(ret != 0)) {
+ PMD_TX_LOG(NOTICE, "sg send failed: %d", ret);
+ ++txq->stats.errors;
+ rte_mempool_put(hv->tx_pool, txd);
+ goto fail;
+ }
+ }
+ }
+
+ /* If partial buffer left, then try and send it.
+ * if that fails, then reuse it on next send.
+ */
+ hn_flush_txagg(txq, &need_sig);
+
+fail:
+ if (need_sig)
+ rte_vmbus_chan_signal_tx(txq->chan);
+
+ return nb_tx;
+}
+
+uint16_t
+hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hn_rx_queue *rxq = prxq;
+ struct hn_data *hv = rxq->hv;
+
+ if (unlikely(hv->closed))
+ return 0;
+
+ /* If ring is empty then process more */
+ if (rte_ring_count(rxq->rx_ring) < nb_pkts)
+ hn_process_events(hv, rxq->queue_id);
+
+ /* Get mbufs off staging ring */
+ return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts,
+ nb_pkts, NULL);
+}
diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h
new file mode 100644
index 00000000..f7ff8585
--- /dev/null
+++ b/drivers/net/netvsc/hn_var.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2009-2018 Microsoft Corp.
+ * Copyright (c) 2016 Brocade Communications Systems, Inc.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ */
+
+/*
+ * Tunable ethdev params
+ */
+#define HN_MIN_RX_BUF_SIZE 1024
+#define HN_MAX_XFER_LEN 2048
+#define HN_MAX_MAC_ADDRS 1
+#define HN_MAX_CHANNELS 64
+
+/* Claimed to be 12232B */
+#define HN_MTU_MAX (9 * 1024)
+
+/* Retry interval */
+#define HN_CHAN_INTERVAL_US 100
+
+/* Buffers need to be aligned */
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (PAGE_SIZE - 1)
+#endif
+
+struct hn_data;
+struct hn_txdesc;
+
+struct hn_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+ uint64_t nomemory;
+ uint64_t multicast;
+ uint64_t broadcast;
+ /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
+ uint64_t size_bins[8];
+};
+
+struct hn_tx_queue {
+ struct hn_data *hv;
+ struct vmbus_channel *chan;
+ uint16_t port_id;
+ uint16_t queue_id;
+ uint32_t free_thresh;
+
+ /* Applied packet transmission aggregation limits. */
+ uint32_t agg_szmax;
+ uint32_t agg_pktmax;
+ uint32_t agg_align;
+
+ /* Packet transmission aggregation states */
+ struct hn_txdesc *agg_txd;
+ uint32_t agg_pktleft;
+ uint32_t agg_szleft;
+ struct rndis_packet_msg *agg_prevpkt;
+
+ struct hn_stats stats;
+};
+
+struct hn_rx_queue {
+ struct hn_data *hv;
+ struct vmbus_channel *chan;
+ struct rte_mempool *mb_pool;
+ struct rte_ring *rx_ring;
+
+ rte_spinlock_t ring_lock;
+ uint32_t event_sz;
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct hn_stats stats;
+ uint64_t ring_full;
+
+ uint8_t event_buf[];
+};
+
+
+/* multi-packet data from host */
+struct hn_rx_bufinfo {
+ struct vmbus_channel *chan;
+ struct hn_data *hv;
+ uint64_t xactid;
+ struct rte_mbuf_ext_shared_info shinfo;
+} __rte_cache_aligned;
+
+struct hn_data {
+ struct rte_vmbus_device *vmbus;
+ struct hn_rx_queue *primary;
+ uint16_t port_id;
+ bool closed;
+ uint32_t link_status;
+ uint32_t link_speed;
+
+ struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
+ struct hn_rx_bufinfo *rxbuf_info;
+ uint32_t rxbuf_section_cnt; /* # of Rx sections */
+ volatile uint32_t rxbuf_outstanding;
+ uint16_t max_queues; /* Max available queues */
+ uint16_t num_queues;
+ uint64_t rss_offloads;
+
+ struct rte_mem_resource *chim_res; /* UIO resource for Tx */
+ struct rte_mempool *tx_pool; /* Tx descriptors */
+ uint32_t chim_szmax; /* Max size per buffer */
+ uint32_t chim_cnt; /* Max packets per buffer */
+
+ uint32_t nvs_ver;
+ uint32_t ndis_ver;
+ uint32_t rndis_agg_size;
+ uint32_t rndis_agg_pkts;
+ uint32_t rndis_agg_align;
+
+ volatile uint32_t rndis_pending;
+ rte_atomic32_t rndis_req_id;
+ uint8_t rndis_resp[256];
+
+ struct ether_addr mac_addr;
+ struct vmbus_channel *channels[HN_MAX_CHANNELS];
+};
+
+static inline struct vmbus_channel *
+hn_primary_chan(const struct hn_data *hv)
+{
+ return hv->channels[0];
+}
+
+void hn_process_events(struct hn_data *hv, uint16_t queue_id);
+
+uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+int hn_tx_pool_init(struct rte_eth_dev *dev);
+int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void hn_dev_tx_queue_release(void *arg);
+void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_txq_info *qinfo);
+
+struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
+ uint16_t queue_id,
+ unsigned int socket_id);
+int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+void hn_dev_rx_queue_release(void *arg);
+void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_rxq_info *qinfo);
diff --git a/drivers/net/netvsc/meson.build b/drivers/net/netvsc/meson.build
new file mode 100644
index 00000000..a717cdd4
--- /dev/null
+++ b/drivers/net/netvsc/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Microsoft Corporation
+
+build = dpdk_conf.has('RTE_LIBRTE_VMBUS_BUS')
+version = 2
+sources = files('hn_ethdev.c', 'hn_rxtx.c', 'hn_rndis.c', 'hn_nvs.c')
+
+deps += ['bus_vmbus' ]
+
+allow_experimental_apis = true
diff --git a/drivers/net/netvsc/ndis.h b/drivers/net/netvsc/ndis.h
new file mode 100644
index 00000000..2e7ca99b
--- /dev/null
+++ b/drivers/net/netvsc/ndis.h
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * All rights reserved.
+ */
+
+#ifndef _NET_NDIS_H_
+#define _NET_NDIS_H_
+
+#define NDIS_MEDIA_STATE_CONNECTED 0
+#define NDIS_MEDIA_STATE_DISCONNECTED 1
+
+#define NDIS_NETCHANGE_TYPE_POSSIBLE 1
+#define NDIS_NETCHANGE_TYPE_DEFINITE 2
+#define NDIS_NETCHANGE_TYPE_FROMMEDIA 3
+
+#define NDIS_OFFLOAD_SET_NOCHG 0
+#define NDIS_OFFLOAD_SET_ON 1
+#define NDIS_OFFLOAD_SET_OFF 2
+
+/* a.k.a GRE MAC */
+#define NDIS_ENCAP_TYPE_NVGRE 0x00000001
+
+#define NDIS_HASH_FUNCTION_MASK 0x000000FF /* see hash function */
+#define NDIS_HASH_TYPE_MASK 0x00FFFF00 /* see hash type */
+
+/* hash function */
+#define NDIS_HASH_FUNCTION_TOEPLITZ 0x00000001
+
+/* hash type */
+#define NDIS_HASH_IPV4 0x00000100
+#define NDIS_HASH_TCP_IPV4 0x00000200
+#define NDIS_HASH_IPV6 0x00000400
+#define NDIS_HASH_IPV6_EX 0x00000800
+#define NDIS_HASH_TCP_IPV6 0x00001000
+#define NDIS_HASH_TCP_IPV6_EX 0x00002000
+
+#define NDIS_HASH_KEYSIZE_TOEPLITZ 40
+#define NDIS_HASH_INDCNT 128
+
+#define NDIS_OBJTYPE_DEFAULT 0x80
+#define NDIS_OBJTYPE_RSS_CAPS 0x88
+#define NDIS_OBJTYPE_RSS_PARAMS 0x89
+#define NDIS_OBJTYPE_OFFLOAD 0xa7
+
+struct ndis_object_hdr {
+ uint8_t ndis_type; /* NDIS_OBJTYPE_ */
+ uint8_t ndis_rev; /* type specific */
+ uint16_t ndis_size; /* incl. this hdr */
+} __rte_packed;
+
+/*
+ * OID_TCP_OFFLOAD_PARAMETERS
+ * ndis_type: NDIS_OBJTYPE_DEFAULT
+ */
+struct ndis_offload_params {
+ struct ndis_object_hdr ndis_hdr;
+ uint8_t ndis_ip4csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_tcp4csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_udp4csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_tcp6csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_udp6csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_lsov1; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_ipsecv1; /* NDIS_OFFLOAD_IPSECV1_ */
+ uint8_t ndis_lsov2_ip4; /* NDIS_OFFLOAD_LSOV2_ */
+ uint8_t ndis_lsov2_ip6; /* NDIS_OFFLOAD_LSOV2_ */
+ uint8_t ndis_tcp4conn; /* 0 */
+ uint8_t ndis_tcp6conn; /* 0 */
+ uint32_t ndis_flags; /* 0 */
+ /* NDIS >= 6.1 */
+ uint8_t ndis_ipsecv2; /* NDIS_OFFLOAD_IPSECV2_ */
+ uint8_t ndis_ipsecv2_ip4;/* NDIS_OFFLOAD_IPSECV2_ */
+ /* NDIS >= 6.30 */
+ uint8_t ndis_rsc_ip4; /* NDIS_OFFLOAD_RSC_ */
+ uint8_t ndis_rsc_ip6; /* NDIS_OFFLOAD_RSC_ */
+ uint8_t ndis_encap; /* NDIS_OFFLOAD_SET_ */
+ uint8_t ndis_encap_types;/* NDIS_ENCAP_TYPE_ */
+};
+
+#define NDIS_OFFLOAD_PARAMS_SIZE sizeof(struct ndis_offload_params)
+#define NDIS_OFFLOAD_PARAMS_SIZE_6_1 \
+ offsetof(struct ndis_offload_params, ndis_rsc_ip4)
+
+#define NDIS_OFFLOAD_PARAMS_REV_2 2 /* NDIS 6.1 */
+#define NDIS_OFFLOAD_PARAMS_REV_3 3 /* NDIS 6.30 */
+
+#define NDIS_OFFLOAD_PARAM_NOCHG 0 /* common */
+#define NDIS_OFFLOAD_PARAM_OFF 1
+#define NDIS_OFFLOAD_PARAM_TX 2
+#define NDIS_OFFLOAD_PARAM_RX 3
+#define NDIS_OFFLOAD_PARAM_TXRX 4
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_LSOV1_OFF 1
+#define NDIS_OFFLOAD_LSOV1_ON 2
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_IPSECV1_OFF 1
+#define NDIS_OFFLOAD_IPSECV1_AH 2
+#define NDIS_OFFLOAD_IPSECV1_ESP 3
+#define NDIS_OFFLOAD_IPSECV1_AH_ESP 4
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_LSOV2_OFF 1
+#define NDIS_OFFLOAD_LSOV2_ON 2
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_IPSECV2_OFF 1
+#define NDIS_OFFLOAD_IPSECV2_AH 2
+#define NDIS_OFFLOAD_IPSECV2_ESP 3
+#define NDIS_OFFLOAD_IPSECV2_AH_ESP 4
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_RSC_OFF 1
+#define NDIS_OFFLOAD_RSC_ON 2
+
+/*
+ * OID_GEN_RECEIVE_SCALE_CAPABILITIES
+ * ndis_type: NDIS_OBJTYPE_RSS_CAPS
+ */
+struct ndis_rss_caps {
+ struct ndis_object_hdr ndis_hdr;
+ uint32_t ndis_caps; /* NDIS_RSS_CAP_ */
+ uint32_t ndis_nmsi; /* # of MSIs */
+ uint32_t ndis_nrxr; /* # of RX rings */
+ /* NDIS >= 6.30 */
+ uint16_t ndis_nind; /* # of indtbl ent. */
+ uint16_t ndis_pad;
+} __rte_packed;
+
+#define NDIS_RSS_CAPS_SIZE \
+ offsetof(struct ndis_rss_caps, ndis_pad)
+#define NDIS_RSS_CAPS_SIZE_6_0 \
+ offsetof(struct ndis_rss_caps, ndis_nind)
+
+#define NDIS_RSS_CAPS_REV_1 1 /* NDIS 6.{0,1,20} */
+#define NDIS_RSS_CAPS_REV_2 2 /* NDIS 6.30 */
+
+#define NDIS_RSS_CAP_MSI 0x01000000
+#define NDIS_RSS_CAP_CLASSIFY_ISR 0x02000000
+#define NDIS_RSS_CAP_CLASSIFY_DPC 0x04000000
+#define NDIS_RSS_CAP_MSIX 0x08000000
+#define NDIS_RSS_CAP_IPV4 0x00000100
+#define NDIS_RSS_CAP_IPV6 0x00000200
+#define NDIS_RSS_CAP_IPV6_EX 0x00000400
+#define NDIS_RSS_CAP_HASH_TOEPLITZ NDIS_HASH_FUNCTION_TOEPLITZ
+#define NDIS_RSS_CAP_HASHFUNC_MASK NDIS_HASH_FUNCTION_MASK
+
+/*
+ * OID_GEN_RECEIVE_SCALE_PARAMETERS
+ * ndis_type: NDIS_OBJTYPE_RSS_PARAMS
+ */
+struct ndis_rss_params {
+ struct ndis_object_hdr ndis_hdr;
+ uint16_t ndis_flags; /* NDIS_RSS_FLAG_ */
+ uint16_t ndis_bcpu; /* base cpu 0 */
+ uint32_t ndis_hash; /* NDIS_HASH_ */
+ uint16_t ndis_indsize; /* indirect table */
+ uint32_t ndis_indoffset;
+ uint16_t ndis_keysize; /* hash key */
+ uint32_t ndis_keyoffset;
+ /* NDIS >= 6.20 */
+ uint32_t ndis_cpumaskoffset;
+ uint32_t ndis_cpumaskcnt;
+ uint32_t ndis_cpumaskentsz;
+};
+
+#define NDIS_RSS_PARAMS_SIZE sizeof(struct ndis_rss_params)
+#define NDIS_RSS_PARAMS_SIZE_6_0 \
+ offsetof(struct ndis_rss_params, ndis_cpumaskoffset)
+
+#define NDIS_RSS_PARAMS_REV_1 1 /* NDIS 6.0 */
+#define NDIS_RSS_PARAMS_REV_2 2 /* NDIS 6.20 */
+
+#define NDIS_RSS_FLAG_NONE 0x0000
+#define NDIS_RSS_FLAG_BCPU_UNCHG 0x0001
+#define NDIS_RSS_FLAG_HASH_UNCHG 0x0002
+#define NDIS_RSS_FLAG_IND_UNCHG 0x0004
+#define NDIS_RSS_FLAG_KEY_UNCHG 0x0008
+#define NDIS_RSS_FLAG_DISABLE 0x0010
+
+/* non-standard convenient struct */
+struct ndis_rssprm_toeplitz {
+ struct ndis_rss_params rss_params;
+ /* Indirect table */
+ uint32_t rss_ind[NDIS_HASH_INDCNT];
+ /* Toeplitz hash key */
+ uint8_t rss_key[NDIS_HASH_KEYSIZE_TOEPLITZ];
+};
+
+#define NDIS_RSSPRM_TOEPLITZ_SIZE(nind) \
+ offsetof(struct ndis_rssprm_toeplitz, rss_ind[nind])
+
+/*
+ * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES
+ * ndis_type: NDIS_OBJTYPE_OFFLOAD
+ */
+
+#define NDIS_OFFLOAD_ENCAP_NONE 0x0000
+#define NDIS_OFFLOAD_ENCAP_NULL 0x0001
+#define NDIS_OFFLOAD_ENCAP_8023 0x0002
+#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004
+#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008
+#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010
+
+struct ndis_csum_offload {
+ uint32_t ndis_ip4_txenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip4_txcsum;
+#define NDIS_TXCSUM_CAP_IP4OPT 0x001
+#define NDIS_TXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP4 0x010
+#define NDIS_TXCSUM_CAP_UDP4 0x040
+#define NDIS_TXCSUM_CAP_IP4 0x100
+ uint32_t ndis_ip4_rxenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip4_rxcsum;
+#define NDIS_RXCSUM_CAP_IP4OPT 0x001
+#define NDIS_RXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP4 0x010
+#define NDIS_RXCSUM_CAP_UDP4 0x040
+#define NDIS_RXCSUM_CAP_IP4 0x100
+ uint32_t ndis_ip6_txenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip6_txcsum;
+#define NDIS_TXCSUM_CAP_IP6EXT 0x001
+#define NDIS_TXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP6 0x010
+#define NDIS_TXCSUM_CAP_UDP6 0x040
+ uint32_t ndis_ip6_rxenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip6_rxcsum;
+#define NDIS_RXCSUM_CAP_IP6EXT 0x001
+#define NDIS_RXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP6 0x010
+#define NDIS_RXCSUM_CAP_UDP6 0x040
+};
+
+struct ndis_lsov1_offload {
+ uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_maxsize;
+ uint32_t ndis_minsegs;
+ uint32_t ndis_opts;
+};
+
+struct ndis_ipsecv1_offload {
+ uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ah_esp;
+ uint32_t ndis_xport_tun;
+ uint32_t ndis_ip4_opts;
+ uint32_t ndis_flags;
+ uint32_t ndis_ip4_ah;
+ uint32_t ndis_ip4_esp;
+};
+
+struct ndis_lsov2_offload {
+ uint32_t ndis_ip4_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip4_maxsz;
+ uint32_t ndis_ip4_minsg;
+ uint32_t ndis_ip6_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip6_maxsz;
+ uint32_t ndis_ip6_minsg;
+ uint32_t ndis_ip6_opts;
+#define NDIS_LSOV2_CAP_IP6EXT 0x001
+#define NDIS_LSOV2_CAP_TCP6OPT 0x004
+};
+
+struct ndis_ipsecv2_offload {
+ uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint16_t ndis_ip6;
+ uint16_t ndis_ip4opt;
+ uint16_t ndis_ip6ext;
+ uint16_t ndis_ah;
+ uint16_t ndis_esp;
+ uint16_t ndis_ah_esp;
+ uint16_t ndis_xport;
+ uint16_t ndis_tun;
+ uint16_t ndis_xport_tun;
+ uint16_t ndis_lso;
+ uint16_t ndis_extseq;
+ uint32_t ndis_udp_esp;
+ uint32_t ndis_auth;
+ uint32_t ndis_crypto;
+ uint32_t ndis_sa_caps;
+};
+
+struct ndis_rsc_offload {
+ uint16_t ndis_ip4;
+ uint16_t ndis_ip6;
+};
+
+struct ndis_encap_offload {
+ uint32_t ndis_flags;
+ uint32_t ndis_maxhdr;
+};
+
+struct ndis_offload {
+ struct ndis_object_hdr ndis_hdr;
+ struct ndis_csum_offload ndis_csum;
+ struct ndis_lsov1_offload ndis_lsov1;
+ struct ndis_ipsecv1_offload ndis_ipsecv1;
+ struct ndis_lsov2_offload ndis_lsov2;
+ uint32_t ndis_flags;
+ /* NDIS >= 6.1 */
+ struct ndis_ipsecv2_offload ndis_ipsecv2;
+ /* NDIS >= 6.30 */
+ struct ndis_rsc_offload ndis_rsc;
+ struct ndis_encap_offload ndis_encap_gre;
+};
+
+#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload)
+#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ndis_ipsecv2)
+#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, ndis_rsc)
+
+#define NDIS_OFFLOAD_REV_1 1 /* NDIS 6.0 */
+#define NDIS_OFFLOAD_REV_2 2 /* NDIS 6.1 */
+#define NDIS_OFFLOAD_REV_3 3 /* NDIS 6.30 */
+
+/*
+ * Per-packet-info
+ */
+
+/* VLAN */
+#define NDIS_VLAN_INFO_SIZE sizeof(uint32_t)
+#define NDIS_VLAN_INFO_PRI_MASK 0x0007
+#define NDIS_VLAN_INFO_CFI_MASK 0x0008
+#define NDIS_VLAN_INFO_ID_MASK 0xfff0
+#define NDIS_VLAN_INFO_MAKE(id, pri, cfi) \
+ (((pri) & NDIS_VLAN_INFO_PRI_MASK) | \
+ (((cfi) & 0x1) << 3) | (((id) & 0xfff) << 4))
+#define NDIS_VLAN_INFO_ID(inf) (((inf) & NDIS_VLAN_INFO_ID_MASK) >> 4)
+#define NDIS_VLAN_INFO_CFI(inf) (((inf) & NDIS_VLAN_INFO_CFI_MASK) >> 3)
+#define NDIS_VLAN_INFO_PRI(inf) ((inf) & NDIS_VLAN_INFO_PRI_MASK)
+
+/* Reception checksum */
+#define NDIS_RXCSUM_INFO_SIZE sizeof(uint32_t)
+#define NDIS_RXCSUM_INFO_TCPCS_FAILED 0x0001
+#define NDIS_RXCSUM_INFO_UDPCS_FAILED 0x0002
+#define NDIS_RXCSUM_INFO_IPCS_FAILED 0x0004
+#define NDIS_RXCSUM_INFO_TCPCS_OK 0x0008
+#define NDIS_RXCSUM_INFO_UDPCS_OK 0x0010
+#define NDIS_RXCSUM_INFO_IPCS_OK 0x0020
+#define NDIS_RXCSUM_INFO_LOOPBACK 0x0040
+#define NDIS_RXCSUM_INFO_TCPCS_INVAL 0x0080
+#define NDIS_RXCSUM_INFO_IPCS_INVAL 0x0100
+
+/* LSOv2 */
+#define NDIS_LSO2_INFO_SIZE sizeof(uint32_t)
+#define NDIS_LSO2_INFO_MSS_MASK 0x000fffff
+#define NDIS_LSO2_INFO_THOFF_MASK 0x3ff00000
+#define NDIS_LSO2_INFO_ISLSO2 0x40000000
+#define NDIS_LSO2_INFO_ISIPV6 0x80000000
+
+#define NDIS_LSO2_INFO_MAKE(thoff, mss) \
+ ((((uint32_t)(mss)) & NDIS_LSO2_INFO_MSS_MASK) | \
+ ((((uint32_t)(thoff)) & 0x3ff) << 20) | \
+ NDIS_LSO2_INFO_ISLSO2)
+
+#define NDIS_LSO2_INFO_MAKEIPV4(thoff, mss) \
+ NDIS_LSO2_INFO_MAKE((thoff), (mss))
+
+#define NDIS_LSO2_INFO_MAKEIPV6(thoff, mss) \
+ (NDIS_LSO2_INFO_MAKE((thoff), (mss)) | NDIS_LSO2_INFO_ISIPV6)
+
+/* Transmission checksum */
+#define NDIS_TXCSUM_INFO_SIZE sizeof(uint32_t)
+#define NDIS_TXCSUM_INFO_IPV4 0x00000001
+#define NDIS_TXCSUM_INFO_IPV6 0x00000002
+#define NDIS_TXCSUM_INFO_TCPCS 0x00000004
+#define NDIS_TXCSUM_INFO_UDPCS 0x00000008
+#define NDIS_TXCSUM_INFO_IPCS 0x00000010
+#define NDIS_TXCSUM_INFO_THOFF 0x03ff0000
+
+#define NDIS_TXCSUM_INFO_MKL4CS(thoff, flag) \
+ ((((uint32_t)(thoff)) << 16) | (flag))
+
+#define NDIS_TXCSUM_INFO_MKTCPCS(thoff) \
+ NDIS_TXCSUM_INFO_MKL4CS((thoff), NDIS_TXCSUM_INFO_TCPCS)
+
+#define NDIS_TXCSUM_INFO_MKUDPCS(thoff) \
+ NDIS_TXCSUM_INFO_MKL4CS((thoff), NDIS_TXCSUM_INFO_UDPCS)
+
+#endif /* !_NET_NDIS_H_ */
diff --git a/drivers/net/netvsc/rndis.h b/drivers/net/netvsc/rndis.h
new file mode 100644
index 00000000..eac9a99f
--- /dev/null
+++ b/drivers/net/netvsc/rndis.h
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * Copyright (c) 2010 Jonathan Armani <armani@openbsd.org>
+ * Copyright (c) 2010 Fabien Romano <fabien@openbsd.org>
+ * Copyright (c) 2010 Michael Knudsen <mk@openbsd.org>
+ * All rights reserved.
+ */
+
+#ifndef _NET_RNDIS_H_
+#define _NET_RNDIS_H_
+
+/* Canonical major/minor version as of 22th Aug. 2016. */
+#define RNDIS_VERSION_MAJOR 0x00000001
+#define RNDIS_VERSION_MINOR 0x00000000
+
+#define RNDIS_STATUS_SUCCESS 0x00000000
+#define RNDIS_STATUS_PENDING 0x00000103
+
+#define RNDIS_STATUS_ONLINE 0x40010003
+#define RNDIS_STATUS_RESET_START 0x40010004
+#define RNDIS_STATUS_RESET_END 0x40010005
+#define RNDIS_STATUS_RING_STATUS 0x40010006
+#define RNDIS_STATUS_CLOSED 0x40010007
+#define RNDIS_STATUS_WAN_LINE_UP 0x40010008
+#define RNDIS_STATUS_WAN_LINE_DOWN 0x40010009
+#define RNDIS_STATUS_WAN_FRAGMENT 0x4001000A
+#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000B
+#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000C
+#define RNDIS_STATUS_HARDWARE_LINE_UP 0x4001000D
+#define RNDIS_STATUS_HARDWARE_LINE_DOWN 0x4001000E
+#define RNDIS_STATUS_INTERFACE_UP 0x4001000F
+#define RNDIS_STATUS_INTERFACE_DOWN 0x40010010
+#define RNDIS_STATUS_MEDIA_BUSY 0x40010011
+#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION 0x40010012
+#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION
+#define RNDIS_STATUS_LINK_SPEED_CHANGE 0x40010013
+#define RNDIS_STATUS_NETWORK_CHANGE 0x40010018
+#define RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG 0x40020006
+
+#define RNDIS_STATUS_FAILURE 0xC0000001
+#define RNDIS_STATUS_RESOURCES 0xC000009A
+#define RNDIS_STATUS_NOT_SUPPORTED 0xC00000BB
+#define RNDIS_STATUS_CLOSING 0xC0010002
+#define RNDIS_STATUS_BAD_VERSION 0xC0010004
+#define RNDIS_STATUS_BAD_CHARACTERISTICS 0xC0010005
+#define RNDIS_STATUS_ADAPTER_NOT_FOUND 0xC0010006
+#define RNDIS_STATUS_OPEN_FAILED 0xC0010007
+#define RNDIS_STATUS_DEVICE_FAILED 0xC0010008
+#define RNDIS_STATUS_MULTICAST_FULL 0xC0010009
+#define RNDIS_STATUS_MULTICAST_EXISTS 0xC001000A
+#define RNDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B
+#define RNDIS_STATUS_REQUEST_ABORTED 0xC001000C
+#define RNDIS_STATUS_RESET_IN_PROGRESS 0xC001000D
+#define RNDIS_STATUS_CLOSING_INDICATING 0xC001000E
+#define RNDIS_STATUS_INVALID_PACKET 0xC001000F
+#define RNDIS_STATUS_OPEN_LIST_FULL 0xC0010010
+#define RNDIS_STATUS_ADAPTER_NOT_READY 0xC0010011
+#define RNDIS_STATUS_ADAPTER_NOT_OPEN 0xC0010012
+#define RNDIS_STATUS_NOT_INDICATING 0xC0010013
+#define RNDIS_STATUS_INVALID_LENGTH 0xC0010014
+#define RNDIS_STATUS_INVALID_DATA 0xC0010015
+#define RNDIS_STATUS_BUFFER_TOO_SHORT 0xC0010016
+#define RNDIS_STATUS_INVALID_OID 0xC0010017
+#define RNDIS_STATUS_ADAPTER_REMOVED 0xC0010018
+#define RNDIS_STATUS_UNSUPPORTED_MEDIA 0xC0010019
+#define RNDIS_STATUS_GROUP_ADDRESS_IN_US 0xC001001A
+#define RNDIS_STATUS_FILE_NOT_FOUND 0xC001001B
+#define RNDIS_STATUS_ERROR_READING_FILE 0xC001001C
+#define RNDIS_STATUS_ALREADY_MAPPED 0xC001001D
+#define RNDIS_STATUS_RESOURCE_CONFLICT 0xC001001E
+#define RNDIS_STATUS_NO_CABLE 0xC001001F
+
+#define OID_GEN_SUPPORTED_LIST 0x00010101
+#define OID_GEN_HARDWARE_STATUS 0x00010102
+#define OID_GEN_MEDIA_SUPPORTED 0x00010103
+#define OID_GEN_MEDIA_IN_USE 0x00010104
+#define OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
+#define OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
+#define OID_GEN_LINK_SPEED 0x00010107
+#define OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
+#define OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
+#define OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
+#define OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
+#define OID_GEN_VENDOR_ID 0x0001010C
+#define OID_GEN_VENDOR_DESCRIPTION 0x0001010D
+#define OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
+#define OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
+#define OID_GEN_DRIVER_VERSION 0x00010110
+#define OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
+#define OID_GEN_PROTOCOL_OPTIONS 0x00010112
+#define OID_GEN_MAC_OPTIONS 0x00010113
+#define OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
+#define OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
+#define OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
+#define OID_GEN_SUPPORTED_GUIDS 0x00010117
+#define OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
+#define OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
+#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203
+#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204
+#define OID_GEN_MACHINE_NAME 0x0001021A
+#define OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
+#define OID_GEN_VLAN_ID 0x0001021C
+
+#define OID_802_3_PERMANENT_ADDRESS 0x01010101
+#define OID_802_3_CURRENT_ADDRESS 0x01010102
+#define OID_802_3_MULTICAST_LIST 0x01010103
+#define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
+#define OID_802_3_MAC_OPTIONS 0x01010105
+#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
+#define OID_802_3_XMIT_ONE_COLLISION 0x01020102
+#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
+#define OID_802_3_XMIT_DEFERRED 0x01020201
+#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
+#define OID_802_3_RCV_OVERRUN 0x01020203
+#define OID_802_3_XMIT_UNDERRUN 0x01020204
+#define OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
+#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
+#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
+
+#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C
+#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D
+
+#define RNDIS_MEDIUM_802_3 0x00000000
+
+/* Device flags */
+#define RNDIS_DF_CONNECTIONLESS 0x00000001
+#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002
+
+/*
+ * Common RNDIS message header.
+ */
+struct rndis_msghdr {
+ uint32_t type;
+ uint32_t len;
+};
+
+/*
+ * RNDIS data message
+ */
+#define RNDIS_PACKET_MSG 0x00000001
+
+struct rndis_packet_msg {
+ uint32_t type;
+ uint32_t len;
+ uint32_t dataoffset;
+ uint32_t datalen;
+ uint32_t oobdataoffset;
+ uint32_t oobdatalen;
+ uint32_t oobdataelements;
+ uint32_t pktinfooffset;
+ uint32_t pktinfolen;
+ uint32_t vchandle;
+ uint32_t reserved;
+};
+
+/*
+ * Minimum value for dataoffset, oobdataoffset, and
+ * pktinfooffset.
+ */
+#define RNDIS_PACKET_MSG_OFFSET_MIN \
+ (sizeof(struct rndis_packet_msg) - \
+ offsetof(struct rndis_packet_msg, dataoffset))
+
+/* Offset from the beginning of rndis_packet_msg. */
+#define RNDIS_PACKET_MSG_OFFSET_ABS(ofs) \
+ ((ofs) + offsetof(struct rndis_packet_msg, dataoffset))
+
+#define RNDIS_PACKET_MSG_OFFSET_ALIGN 4
+#define RNDIS_PACKET_MSG_OFFSET_ALIGNMASK \
+ (RNDIS_PACKET_MSG_OFFSET_ALIGN - 1)
+
+/* Per-packet-info for RNDIS data message */
+struct rndis_pktinfo {
+ uint32_t size;
+ uint32_t type; /* NDIS_PKTINFO_TYPE_ */
+ uint32_t offset;
+ uint8_t data[];
+};
+
+#define RNDIS_PKTINFO_OFFSET \
+ offsetof(struct rndis_pktinfo, data[0])
+#define RNDIS_PKTINFO_SIZE_ALIGN 4
+#define RNDIS_PKTINFO_SIZE_ALIGNMASK (RNDIS_PKTINFO_SIZE_ALIGN - 1)
+
+#define NDIS_PKTINFO_TYPE_CSUM 0
+#define NDIS_PKTINFO_TYPE_IPSEC 1
+#define NDIS_PKTINFO_TYPE_LSO 2
+#define NDIS_PKTINFO_TYPE_CLASSIFY 3
+/* reserved 4 */
+#define NDIS_PKTINFO_TYPE_SGLIST 5
+#define NDIS_PKTINFO_TYPE_VLAN 6
+#define NDIS_PKTINFO_TYPE_ORIG 7
+#define NDIS_PKTINFO_TYPE_PKT_CANCELID 8
+#define NDIS_PKTINFO_TYPE_ORIG_NBLIST 9
+#define NDIS_PKTINFO_TYPE_CACHE_NBLIST 10
+#define NDIS_PKTINFO_TYPE_PKT_PAD 11
+
+/* RNDIS extension */
+
+/* Per-packet hash info */
+#define NDIS_HASH_INFO_SIZE sizeof(uint32_t)
+#define NDIS_PKTINFO_TYPE_HASHINF NDIS_PKTINFO_TYPE_ORIG_NBLIST
+/* NDIS_HASH_ */
+
+/* Per-packet hash value */
+#define NDIS_HASH_VALUE_SIZE sizeof(uint32_t)
+#define NDIS_PKTINFO_TYPE_HASHVAL NDIS_PKTINFO_TYPE_PKT_CANCELID
+
+/* Per-packet-info size */
+#define RNDIS_PKTINFO_SIZE(dlen) offsetof(struct rndis_pktinfo, data[dlen])
+
+/*
+ * RNDIS control messages
+ */
+
+/*
+ * Common header for RNDIS completion messages.
+ *
+ * NOTE: It does not apply to RNDIS_RESET_CMPLT.
+ */
+struct rndis_comp_hdr {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+};
+
+/* Initialize the device. */
+#define RNDIS_INITIALIZE_MSG 0x00000002
+#define RNDIS_INITIALIZE_CMPLT 0x80000002
+
+struct rndis_init_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t ver_major;
+ uint32_t ver_minor;
+ uint32_t max_xfersz;
+};
+
+struct rndis_init_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+ uint32_t ver_major;
+ uint32_t ver_minor;
+ uint32_t devflags;
+ uint32_t medium;
+ uint32_t pktmaxcnt;
+ uint32_t pktmaxsz;
+ uint32_t align;
+ uint32_t aflistoffset;
+ uint32_t aflistsz;
+};
+
+#define RNDIS_INIT_COMP_SIZE_MIN \
+ offsetof(struct rndis_init_comp, aflistsz)
+
+/* Halt the device. No response sent. */
+#define RNDIS_HALT_MSG 0x00000003
+
+struct rndis_halt_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+};
+
+/* Send a query object. */
+#define RNDIS_QUERY_MSG 0x00000004
+#define RNDIS_QUERY_CMPLT 0x80000004
+
+struct rndis_query_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t oid;
+ uint32_t infobuflen;
+ uint32_t infobufoffset;
+ uint32_t devicevchdl;
+};
+
+#define RNDIS_QUERY_REQ_INFOBUFOFFSET \
+ (sizeof(struct rndis_query_req) - \
+ offsetof(struct rndis_query_req, rid))
+
+struct rndis_query_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+ uint32_t infobuflen;
+ uint32_t infobufoffset;
+};
+
+/* infobuf offset from the beginning of rndis_query_comp. */
+#define RNDIS_QUERY_COMP_INFOBUFOFFSET_ABS(ofs) \
+ ((ofs) + offsetof(struct rndis_query_comp, rid))
+
+/* Send a set object request. */
+#define RNDIS_SET_MSG 0x00000005
+#define RNDIS_SET_CMPLT 0x80000005
+
+struct rndis_set_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t oid;
+ uint32_t infobuflen;
+ uint32_t infobufoffset;
+ uint32_t devicevchdl;
+};
+
+#define RNDIS_SET_REQ_INFOBUFOFFSET \
+ (sizeof(struct rndis_set_req) - \
+ offsetof(struct rndis_set_req, rid))
+
+struct rndis_set_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+};
+
+/*
+ * Parameter used by OID_GEN_RNDIS_CONFIG_PARAMETER.
+ */
+#define RNDIS_SET_PARAM_NUMERIC 0x00000000
+#define RNDIS_SET_PARAM_STRING 0x00000002
+
+struct rndis_set_parameter {
+ uint32_t nameoffset;
+ uint32_t namelen;
+ uint32_t type;
+ uint32_t valueoffset;
+ uint32_t valuelen;
+};
+
+/* Perform a soft reset on the device. */
+#define RNDIS_RESET_MSG 0x00000006
+#define RNDIS_RESET_CMPLT 0x80000006
+
+struct rndis_reset_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+};
+
+struct rndis_reset_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t status;
+ uint32_t adrreset;
+};
+
+/* 802.3 link-state or undefined message error. Sent by device. */
+#define RNDIS_INDICATE_STATUS_MSG 0x00000007
+
+struct rndis_status_msg {
+ uint32_t type;
+ uint32_t len;
+ uint32_t status;
+ uint32_t stbuflen;
+ uint32_t stbufoffset;
+ /* rndis_diag_info */
+};
+
+/* stbuf offset from the beginning of rndis_status_msg. */
+#define RNDIS_STBUFOFFSET_ABS(ofs) \
+ ((ofs) + offsetof(struct rndis_status_msg, status))
+
+/*
+ * Immediately after rndis_status_msg.stbufoffset, if a control
+ * message is malformatted, or a packet message contains inappropriate
+ * content.
+ */
+struct rndis_diag_info {
+ uint32_t diagstatus;
+ uint32_t erroffset;
+};
+
+/* Keepalive message. May be sent by device. */
+#define RNDIS_KEEPALIVE_MSG 0x00000008
+#define RNDIS_KEEPALIVE_CMPLT 0x80000008
+
+struct rndis_keepalive_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+};
+
+struct rndis_keepalive_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+};
+
+/* Packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */
+#define NDIS_PACKET_TYPE_NONE 0x00000000
+#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
+#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
+#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
+#define NDIS_PACKET_TYPE_BROADCAST 0x00000008
+#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
+#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
+#define NDIS_PACKET_TYPE_SMT 0x00000040
+#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
+#define NDIS_PACKET_TYPE_GROUP 0x00001000
+#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00002000
+#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00004000
+#define NDIS_PACKET_TYPE_MAC_FRAME 0x00008000
+
+#endif /* !_NET_RNDIS_H_ */
diff --git a/drivers/net/netvsc/rte_pmd_netvsc_version.map b/drivers/net/netvsc/rte_pmd_netvsc_version.map
new file mode 100644
index 00000000..d534019a
--- /dev/null
+++ b/drivers/net/netvsc/rte_pmd_netvsc_version.map
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/net/nfp/meson.build b/drivers/net/nfp/meson.build
new file mode 100644
index 00000000..3ba37e27
--- /dev/null
+++ b/drivers/net/nfp/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('nfpcore/nfp_cpp_pcie_ops.c',
+ 'nfpcore/nfp_nsp.c',
+ 'nfpcore/nfp_cppcore.c',
+ 'nfpcore/nfp_resource.c',
+ 'nfpcore/nfp_mip.c',
+ 'nfpcore/nfp_nffw.c',
+ 'nfpcore/nfp_rtsym.c',
+ 'nfpcore/nfp_nsp_cmds.c',
+ 'nfpcore/nfp_crc.c',
+ 'nfpcore/nfp_mutex.c',
+ 'nfpcore/nfp_nsp_eth.c',
+ 'nfpcore/nfp_hwinfo.c',
+ 'nfp_net.c')
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index faad1ee9..6e5e305f 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -411,8 +411,10 @@ nfp_net_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- /* Checking RX offloads */
- if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP))
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads))
PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
return 0;
@@ -1166,7 +1168,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_KEEP_CRC;
if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
@@ -1436,9 +1439,9 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if ((uint32_t)mtu > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
@@ -2253,11 +2256,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->wr_p = 0;
pkt_size -= dma_size;
- if (!pkt_size)
- /* End of packet */
- txds->offset_eop |= PCIE_DESC_TX_EOP;
+
+ /*
+ * Making the EOP, packets with just one segment
+ * the priority
+ */
+ if (likely(!pkt_size))
+ txds->offset_eop = PCIE_DESC_TX_EOP;
else
- txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
+ txds->offset_eop = 0;
pkt = pkt->next;
/* Referencing next free TX descriptor */
@@ -3126,9 +3133,9 @@ static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
* use a lock file if UIO is being used.
*/
if (dev->kdrv == RTE_KDRV_VFIO)
- cpp = nfp_cpp_from_device_name(dev->device.name, 0);
+ cpp = nfp_cpp_from_device_name(dev, 0);
else
- cpp = nfp_cpp_from_device_name(dev->device.name, 1);
+ cpp = nfp_cpp_from_device_name(dev, 1);
if (!cpp) {
PMD_DRV_LOG(ERR, "A CPP handle can not be obtained");
@@ -3277,9 +3284,7 @@ RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
-RTE_INIT(nfp_init_log);
-static void
-nfp_init_log(void)
+RTE_INIT(nfp_init_log)
{
nfp_logtype_init = rte_log_register("pmd.net.nfp.init");
if (nfp_logtype_init >= 0)
diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
index b8541c59..d46574b1 100644
--- a/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
@@ -13,7 +13,6 @@
#include <stdlib.h>
#include <ctype.h>
#include <inttypes.h>
-#include <sys/cdefs.h>
#include <sys/stat.h>
#include <limits.h>
#include <errno.h>
diff --git a/drivers/net/nfp/nfpcore/nfp_cpp.h b/drivers/net/nfp/nfpcore/nfp_cpp.h
index de2ff84e..1427954c 100644
--- a/drivers/net/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/nfp/nfpcore/nfp_cpp.h
@@ -6,6 +6,8 @@
#ifndef __NFP_CPP_H__
#define __NFP_CPP_H__
+#include <rte_ethdev_pci.h>
+
#include "nfp-common/nfp_platform.h"
#include "nfp-common/nfp_resid.h"
@@ -54,7 +56,7 @@ struct nfp_cpp_operations {
size_t area_priv_size;
/* Instance an NFP CPP */
- int (*init)(struct nfp_cpp *cpp, const char *devname);
+ int (*init)(struct nfp_cpp *cpp, struct rte_pci_device *dev);
/*
* Free the bus.
@@ -181,7 +183,7 @@ uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp);
*
* @return NFP CPP handle, or NULL on failure (and set errno accordingly).
*/
-struct nfp_cpp *nfp_cpp_from_device_name(const char *devname,
+struct nfp_cpp *nfp_cpp_from_device_name(struct rte_pci_device *dev,
int driver_lock_needed);
/*
diff --git a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
index 2f5e7f6d..c68d9400 100644
--- a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
+++ b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
@@ -31,6 +31,7 @@
#include <sys/file.h>
#include <sys/stat.h>
+#include <rte_ethdev_pci.h>
#include <rte_string_fns.h>
#include "nfp_cpp.h"
@@ -309,13 +310,8 @@ nfp_enable_bars(struct nfp_pcie_user *nfp)
bar->csr = nfp->cfg +
NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar->index >> 3,
bar->index & 7);
- bar->iomem =
- (char *)mmap(0, 1 << bar->bitsize, PROT_READ | PROT_WRITE,
- MAP_SHARED, nfp->device,
- bar->index << bar->bitsize);
- if (bar->iomem == MAP_FAILED)
- return (-ENOMEM);
+ bar->iomem = nfp->cfg + (bar->index << bar->bitsize);
}
return 0;
}
@@ -345,7 +341,6 @@ nfp_disable_bars(struct nfp_pcie_user *nfp)
for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) {
bar = &nfp->bar[x - 1];
if (bar->iomem) {
- munmap(bar->iomem, 1 << (nfp->barsz - 3));
bar->iomem = NULL;
bar->lock = 0;
}
@@ -639,61 +634,32 @@ nfp_acquire_process_lock(struct nfp_pcie_user *desc)
}
static int
-nfp6000_set_model(struct nfp_pcie_user *desc, struct nfp_cpp *cpp)
+nfp6000_set_model(struct rte_pci_device *dev, struct nfp_cpp *cpp)
{
- char tmp_str[80];
- uint32_t tmp;
- int fp;
-
- snprintf(tmp_str, sizeof(tmp_str), "%s/%s/config", PCI_DEVICES,
- desc->busdev);
-
- fp = open(tmp_str, O_RDONLY);
- if (!fp)
- return -1;
-
- lseek(fp, 0x2e, SEEK_SET);
+ uint32_t model;
- if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) {
- printf("Error reading config file for model\n");
+ if (rte_pci_read_config(dev, &model, 4, 0x2e) < 0) {
+ printf("nfp set model failed\n");
return -1;
}
- tmp = tmp << 16;
-
- if (close(fp) == -1)
- return -1;
-
- nfp_cpp_model_set(cpp, tmp);
+ model = model << 16;
+ nfp_cpp_model_set(cpp, model);
return 0;
}
static int
-nfp6000_set_interface(struct nfp_pcie_user *desc, struct nfp_cpp *cpp)
+nfp6000_set_interface(struct rte_pci_device *dev, struct nfp_cpp *cpp)
{
- char tmp_str[80];
- uint16_t tmp;
- int fp;
-
- snprintf(tmp_str, sizeof(tmp_str), "%s/%s/config", PCI_DEVICES,
- desc->busdev);
+ uint16_t interface;
- fp = open(tmp_str, O_RDONLY);
- if (!fp)
- return -1;
-
- lseek(fp, 0x154, SEEK_SET);
-
- if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) {
- printf("error reading config file for interface\n");
+ if (rte_pci_read_config(dev, &interface, 2, 0x154) < 0) {
+ printf("nfp set interface failed\n");
return -1;
}
- if (close(fp) == -1)
- return -1;
-
- nfp_cpp_interface_set(cpp, tmp);
+ nfp_cpp_interface_set(cpp, interface);
return 0;
}
@@ -704,7 +670,7 @@ nfp6000_set_interface(struct nfp_pcie_user *desc, struct nfp_cpp *cpp)
#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
#define PCI_EXT_CAP_ID_DSN 0x03
static int
-nfp_pci_find_next_ext_capability(int fp, int cap)
+nfp_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
{
uint32_t header;
int ttl;
@@ -713,9 +679,8 @@ nfp_pci_find_next_ext_capability(int fp, int cap)
/* minimum 8 bytes per capability */
ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
- lseek(fp, pos, SEEK_SET);
- if (read(fp, &header, sizeof(header)) != sizeof(header)) {
- printf("error reading config file for serial\n");
+ if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
+ printf("nfp error reading extended capabilities\n");
return -1;
}
@@ -734,9 +699,8 @@ nfp_pci_find_next_ext_capability(int fp, int cap)
if (pos < PCI_CFG_SPACE_SIZE)
break;
- lseek(fp, pos, SEEK_SET);
- if (read(fp, &header, sizeof(header)) != sizeof(header)) {
- printf("error reading config file for serial\n");
+ if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
+ printf("nfp error reading extended capabilities\n");
return -1;
}
}
@@ -745,99 +709,70 @@ nfp_pci_find_next_ext_capability(int fp, int cap)
}
static int
-nfp6000_set_serial(struct nfp_pcie_user *desc, struct nfp_cpp *cpp)
+nfp6000_set_serial(struct rte_pci_device *dev, struct nfp_cpp *cpp)
{
- char tmp_str[80];
uint16_t tmp;
uint8_t serial[6];
int serial_len = 6;
- int fp, pos;
+ int pos;
- snprintf(tmp_str, sizeof(tmp_str), "%s/%s/config", PCI_DEVICES,
- desc->busdev);
-
- fp = open(tmp_str, O_RDONLY);
- if (!fp)
- return -1;
-
- pos = nfp_pci_find_next_ext_capability(fp, PCI_EXT_CAP_ID_DSN);
+ pos = nfp_pci_find_next_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
if (pos <= 0) {
- printf("PCI_EXT_CAP_ID_DSN not found. Using default offset\n");
- lseek(fp, 0x156, SEEK_SET);
+ printf("PCI_EXT_CAP_ID_DSN not found. nfp set serial failed\n");
+ return -1;
} else {
- lseek(fp, pos + 6, SEEK_SET);
+ pos += 6;
}
- if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) {
- printf("error reading config file for serial\n");
+ if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) {
+ printf("nfp set serial failed\n");
return -1;
}
serial[4] = (uint8_t)((tmp >> 8) & 0xff);
serial[5] = (uint8_t)(tmp & 0xff);
- if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) {
- printf("error reading config file for serial\n");
+ pos += 2;
+ if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) {
+ printf("nfp set serial failed\n");
return -1;
}
serial[2] = (uint8_t)((tmp >> 8) & 0xff);
serial[3] = (uint8_t)(tmp & 0xff);
- if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) {
- printf("error reading config file for serial\n");
+ pos += 2;
+ if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) {
+ printf("nfp set serial failed\n");
return -1;
}
serial[0] = (uint8_t)((tmp >> 8) & 0xff);
serial[1] = (uint8_t)(tmp & 0xff);
- if (close(fp) == -1)
- return -1;
-
nfp_cpp_serial_set(cpp, serial, serial_len);
return 0;
}
static int
-nfp6000_set_barsz(struct nfp_pcie_user *desc)
+nfp6000_set_barsz(struct rte_pci_device *dev, struct nfp_pcie_user *desc)
{
- char tmp_str[80];
- unsigned long start, end, flags, tmp;
- int i;
- FILE *fp;
-
- snprintf(tmp_str, sizeof(tmp_str), "%s/%s/resource", PCI_DEVICES,
- desc->busdev);
-
- fp = fopen(tmp_str, "r");
- if (!fp)
- return -1;
+ unsigned long tmp;
+ int i = 0;
- if (fscanf(fp, "0x%lx 0x%lx 0x%lx", &start, &end, &flags) == 0) {
- printf("error reading resource file for bar size\n");
- fclose(fp);
- return -1;
- }
+ tmp = dev->mem_resource[0].len;
- if (fclose(fp) == -1)
- return -1;
-
- tmp = (end - start) + 1;
- i = 0;
while (tmp >>= 1)
i++;
+
desc->barsz = i;
return 0;
}
static int
-nfp6000_init(struct nfp_cpp *cpp, const char *devname)
+nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev)
{
- char link[120];
- char tmp_str[80];
- ssize_t size;
int ret = 0;
uint32_t model;
struct nfp_pcie_user *desc;
@@ -848,7 +783,7 @@ nfp6000_init(struct nfp_cpp *cpp, const char *devname)
memset(desc->busdev, 0, BUSDEV_SZ);
- strlcpy(desc->busdev, devname, sizeof(desc->busdev));
+ strlcpy(desc->busdev, dev->device.name, sizeof(desc->busdev));
if (cpp->driver_lock_needed) {
ret = nfp_acquire_process_lock(desc);
@@ -856,39 +791,16 @@ nfp6000_init(struct nfp_cpp *cpp, const char *devname)
return -1;
}
- snprintf(tmp_str, sizeof(tmp_str), "%s/%s/driver", PCI_DEVICES,
- desc->busdev);
-
- size = readlink(tmp_str, link, sizeof(link));
-
- if (size == -1)
- tmp_str[0] = '\0';
-
- if (size == sizeof(link))
- tmp_str[0] = '\0';
-
- snprintf(tmp_str, sizeof(tmp_str), "%s/%s/resource0", PCI_DEVICES,
- desc->busdev);
-
- desc->device = open(tmp_str, O_RDWR);
- if (desc->device == -1)
- return -1;
-
- if (nfp6000_set_model(desc, cpp) < 0)
+ if (nfp6000_set_model(dev, cpp) < 0)
return -1;
- if (nfp6000_set_interface(desc, cpp) < 0)
+ if (nfp6000_set_interface(dev, cpp) < 0)
return -1;
- if (nfp6000_set_serial(desc, cpp) < 0)
+ if (nfp6000_set_serial(dev, cpp) < 0)
return -1;
- if (nfp6000_set_barsz(desc) < 0)
+ if (nfp6000_set_barsz(dev, desc) < 0)
return -1;
- desc->cfg = (char *)mmap(0, 1 << (desc->barsz - 3),
- PROT_READ | PROT_WRITE,
- MAP_SHARED, desc->device, 0);
-
- if (desc->cfg == MAP_FAILED)
- return -1;
+ desc->cfg = (char *)dev->mem_resource[0].addr;
nfp_enable_bars(desc);
@@ -904,16 +816,8 @@ static void
nfp6000_free(struct nfp_cpp *cpp)
{
struct nfp_pcie_user *desc = nfp_cpp_priv(cpp);
- int x;
- /* Unmap may cause if there are any pending transaxctions */
nfp_disable_bars(desc);
- munmap(desc->cfg, 1 << (desc->barsz - 3));
-
- for (x = ARRAY_SIZE(desc->bar); x > 0; x--) {
- if (desc->bar[x - 1].iomem)
- munmap(desc->bar[x - 1].iomem, 1 << (desc->barsz - 3));
- }
if (cpp->driver_lock_needed)
close(desc->lock);
close(desc->device);
diff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c
index f61143f7..75d3c974 100644
--- a/drivers/net/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c
@@ -12,6 +12,7 @@
#include <sys/types.h>
#include <rte_byteorder.h>
+#include <rte_ethdev_pci.h>
#include "nfp_cpp.h"
#include "nfp_target.h"
@@ -542,7 +543,7 @@ nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t *value)
}
static struct nfp_cpp *
-nfp_cpp_alloc(const char *devname, int driver_lock_needed)
+nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed)
{
const struct nfp_cpp_operations *ops;
struct nfp_cpp *cpp;
@@ -561,7 +562,7 @@ nfp_cpp_alloc(const char *devname, int driver_lock_needed)
cpp->driver_lock_needed = driver_lock_needed;
if (cpp->op->init) {
- err = cpp->op->init(cpp, devname);
+ err = cpp->op->init(cpp, dev);
if (err < 0) {
free(cpp);
return NULL;
@@ -604,9 +605,9 @@ nfp_cpp_free(struct nfp_cpp *cpp)
}
struct nfp_cpp *
-nfp_cpp_from_device_name(const char *devname, int driver_lock_needed)
+nfp_cpp_from_device_name(struct rte_pci_device *dev, int driver_lock_needed)
{
- return nfp_cpp_alloc(devname, driver_lock_needed);
+ return nfp_cpp_alloc(dev, driver_lock_needed);
}
/*
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 1d2e6b9e..244f8654 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -305,6 +305,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->min_rx_bufsize = 0;
dev_info->reta_size = internals->reta_size;
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -623,6 +624,7 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
}
/* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -697,9 +699,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_null,
"size=<int> "
"copy=<int>");
-RTE_INIT(eth_null_init_log);
-static void
-eth_null_init_log(void)
+RTE_INIT(eth_null_init_log)
{
eth_null_logtype = rte_log_register("pmd.net.null");
if (eth_null_logtype >= 0)
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 1eb453b2..0f3d5d67 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -46,9 +46,7 @@ int otx_net_logtype_mbox;
int otx_net_logtype_init;
int otx_net_logtype_driver;
-RTE_INIT(otx_net_init_log);
-static void
-otx_net_init_log(void)
+RTE_INIT(otx_net_init_log)
{
otx_net_logtype_mbox = rte_log_register("pmd.net.octeontx.mbox");
if (otx_net_logtype_mbox >= 0)
@@ -283,7 +281,10 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
@@ -352,6 +353,9 @@ octeontx_dev_close(struct rte_eth_dev *dev)
rte_free(txq);
}
+
+ dev->tx_pkt_burst = NULL;
+ dev->rx_pkt_burst = NULL;
}
static int
@@ -445,9 +449,6 @@ octeontx_dev_stop(struct rte_eth_dev *dev)
ret);
return;
}
-
- dev->tx_pkt_burst = NULL;
- dev->rx_pkt_burst = NULL;
}
static void
@@ -787,7 +788,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
pki_qos_cfg_t pki_qos;
uintptr_t pool;
int ret, port;
- uint8_t gaura;
+ uint16_t gaura;
unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
@@ -898,8 +899,8 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
pool = (uintptr_t)mb_pool->pool_id;
- /* Get the gpool Id */
- gaura = octeontx_fpa_bufpool_gpool(pool);
+ /* Get the gaura Id */
+ gaura = octeontx_fpa_bufpool_gaura(pool);
pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
pki_qos.num_entry = 1;
@@ -1014,6 +1015,8 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
if (eth_dev == NULL)
return -ENODEV;
+ eth_dev->dev_ops = &octeontx_dev_ops;
+ eth_dev->device = &dev->device;
eth_dev->tx_pkt_burst = octeontx_xmit_pkts;
eth_dev->rx_pkt_burst = octeontx_recv_pkts;
rte_eth_dev_probing_finish(eth_dev);
@@ -1182,6 +1185,7 @@ octeontx_probe(struct rte_vdev_device *dev)
}
/* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &octeontx_dev_ops;
+ eth_dev->device = &dev->device;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index 2502d90e..a9149b4e 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -31,7 +31,7 @@ __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
return -ENOSPC;
/* Get the gaura Id */
- gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)tx_pkt->pool->pool_id);
+ gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)tx_pkt->pool->pool_id);
/* Setup PKO_SEND_HDR_S */
cmd_buf[0] = tx_pkt->data_len & 0xffff;
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 6bd4a7d7..e8810a17 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -26,6 +26,7 @@
#define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
#define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
#define ETH_PCAP_RX_IFACE_ARG "rx_iface"
+#define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
#define ETH_PCAP_TX_IFACE_ARG "tx_iface"
#define ETH_PCAP_IFACE_ARG "iface"
@@ -83,6 +84,7 @@ static const char *valid_arguments[] = {
ETH_PCAP_RX_PCAP_ARG,
ETH_PCAP_TX_PCAP_ARG,
ETH_PCAP_RX_IFACE_ARG,
+ ETH_PCAP_RX_IFACE_IN_ARG,
ETH_PCAP_TX_IFACE_ARG,
ETH_PCAP_IFACE_ARG,
NULL
@@ -430,6 +432,7 @@ eth_dev_start(struct rte_eth_dev *dev)
return -1;
rx->pcap = tx->pcap;
}
+
goto status_up;
}
@@ -465,6 +468,12 @@ eth_dev_start(struct rte_eth_dev *dev)
}
status_up:
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
dev->data->dev_link.link_status = ETH_LINK_UP;
return 0;
@@ -517,6 +526,12 @@ eth_dev_stop(struct rte_eth_dev *dev)
}
status_down:
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
dev->data->dev_link.link_status = ETH_LINK_DOWN;
}
@@ -538,6 +553,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = dev->data->nb_rx_queues;
dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -643,6 +659,38 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
+static int
+eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
@@ -651,6 +699,10 @@ static const struct eth_dev_ops ops = {
.dev_infos_get = eth_dev_info,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_start = eth_rx_queue_start,
+ .tx_queue_start = eth_tx_queue_start,
+ .rx_queue_stop = eth_rx_queue_stop,
+ .tx_queue_stop = eth_tx_queue_stop,
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
@@ -658,6 +710,22 @@ static const struct eth_dev_ops ops = {
.stats_reset = eth_stats_reset,
};
+static int
+add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
+ pcap_t *pcap, pcap_dumper_t *dumper)
+{
+ if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (pcap)
+ pmd->queue[pmd->num_of_queue].pcap = pcap;
+ if (dumper)
+ pmd->queue[pmd->num_of_queue].dumper = dumper;
+ pmd->queue[pmd->num_of_queue].name = name;
+ pmd->queue[pmd->num_of_queue].type = type;
+ pmd->num_of_queue++;
+ return 0;
+}
+
/*
* Function handler that opens the pcap file for reading a stores a
* reference of it for use it later on.
@@ -665,18 +733,16 @@ static const struct eth_dev_ops ops = {
static int
open_rx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *pcap_filename = value;
struct pmd_devargs *rx = extra_args;
pcap_t *pcap = NULL;
- for (i = 0; i < rx->num_of_queue; i++) {
- if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
- return -1;
+ if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
+ return -1;
- rx->queue[i].pcap = pcap;
- rx->queue[i].name = pcap_filename;
- rx->queue[i].type = key;
+ if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
+ pcap_close(pcap);
+ return -1;
}
return 0;
@@ -689,18 +755,16 @@ open_rx_pcap(const char *key, const char *value, void *extra_args)
static int
open_tx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *pcap_filename = value;
struct pmd_devargs *dumpers = extra_args;
pcap_dumper_t *dumper;
- for (i = 0; i < dumpers->num_of_queue; i++) {
- if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
- return -1;
+ if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
+ return -1;
- dumpers->queue[i].dumper = dumper;
- dumpers->queue[i].name = pcap_filename;
- dumpers->queue[i].type = key;
+ if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
+ pcap_dump_close(dumper);
+ return -1;
}
return 0;
@@ -726,48 +790,76 @@ open_rx_tx_iface(const char *key, const char *value, void *extra_args)
return 0;
}
+static inline int
+set_iface_direction(const char *iface, pcap_t *pcap,
+ pcap_direction_t direction)
+{
+ const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
+ if (pcap_setdirection(pcap, direction) < 0) {
+ PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
+ iface, direction_str, pcap_geterr(pcap));
+ return -1;
+ }
+ PMD_LOG(INFO, "Setting %s pcap direction %s\n",
+ iface, direction_str);
+ return 0;
+}
+
+static inline int
+open_iface(const char *key, const char *value, void *extra_args)
+{
+ const char *iface = value;
+ struct pmd_devargs *pmd = extra_args;
+ pcap_t *pcap = NULL;
+
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
+ pcap_close(pcap);
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* Opens a NIC for reading packets from it
*/
static inline int
open_rx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
- const char *iface = value;
- struct pmd_devargs *rx = extra_args;
- pcap_t *pcap = NULL;
+ int ret = open_iface(key, value, extra_args);
+ if (ret < 0)
+ return ret;
+ if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
+ struct pmd_devargs *pmd = extra_args;
+ unsigned int qid = pmd->num_of_queue - 1;
- for (i = 0; i < rx->num_of_queue; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- rx->queue[i].pcap = pcap;
- rx->queue[i].name = iface;
- rx->queue[i].type = key;
+ set_iface_direction(pmd->queue[qid].name,
+ pmd->queue[qid].pcap,
+ PCAP_D_IN);
}
return 0;
}
+static inline int
+rx_iface_args_process(const char *key, const char *value, void *extra_args)
+{
+ if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
+ strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
+ return open_rx_iface(key, value, extra_args);
+
+ return 0;
+}
+
/*
* Opens a NIC for writing packets to it
*/
static int
open_tx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
- const char *iface = value;
- struct pmd_devargs *tx = extra_args;
- pcap_t *pcap;
-
- for (i = 0; i < tx->num_of_queue; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- tx->queue[i].pcap = pcap;
- tx->queue[i].name = iface;
- tx->queue[i].type = key;
- }
-
- return 0;
+ return open_iface(key, value, extra_args);
}
static struct rte_vdev_driver pmd_pcap_drv;
@@ -925,6 +1017,7 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
}
/* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -958,22 +1051,16 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
* We check whether we want to open a RX stream from a real NIC or a
* pcap file
*/
- pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
- if (pcaps.num_of_queue)
- is_rx_pcap = 1;
- else
- pcaps.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_RX_IFACE_ARG);
-
- if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
- pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+ is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
+ pcaps.num_of_queue = 0;
- if (is_rx_pcap)
+ if (is_rx_pcap) {
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
&open_rx_pcap, &pcaps);
- else
- ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
- &open_rx_iface, &pcaps);
+ } else {
+ ret = rte_kvargs_process(kvlist, NULL,
+ &rx_iface_args_process, &pcaps);
+ }
if (ret < 0)
goto free_kvlist;
@@ -982,15 +1069,8 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
* We check whether we want to open a TX stream to a real NIC or a
* pcap file
*/
- dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
- if (dumpers.num_of_queue)
- is_tx_pcap = 1;
- else
- dumpers.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_TX_IFACE_ARG);
-
- if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
- dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+ is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
+ dumpers.num_of_queue = 0;
if (is_tx_pcap)
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
@@ -1046,12 +1126,11 @@ RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
ETH_PCAP_RX_PCAP_ARG "=<string> "
ETH_PCAP_TX_PCAP_ARG "=<string> "
ETH_PCAP_RX_IFACE_ARG "=<ifc> "
+ ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
ETH_PCAP_TX_IFACE_ARG "=<ifc> "
ETH_PCAP_IFACE_ARG "=<ifc>");
-RTE_INIT(eth_pcap_init_log);
-static void
-eth_pcap_init_log(void)
+RTE_INIT(eth_pcap_init_log)
{
eth_pcap_logtype = rte_log_register("pmd.net.pcap");
if (eth_pcap_logtype >= 0)
diff --git a/drivers/net/qede/LICENSE.qede_pmd b/drivers/net/qede/LICENSE.qede_pmd
deleted file mode 100644
index 022085a1..00000000
--- a/drivers/net/qede/LICENSE.qede_pmd
+++ /dev/null
@@ -1,3 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016 - 2018 Cavium Inc.
- */
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index c30a867e..488ca1d9 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -1,8 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2016 - 2018 Cavium Inc.
# All rights reserved.
# www.cavium.com
-#
-# See LICENSE.qede_pmd for copyright and licensing details.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index ca1c2b11..d5d6f8e2 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include <rte_memzone.h>
@@ -201,6 +199,11 @@ void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Free memzone %s\n", ecore_mz_mapping[j]->name);
rte_memzone_free(ecore_mz_mapping[j]);
+ while (j < ecore_mz_count - 1) {
+ ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
+ j++;
+ }
+ ecore_mz_count--;
return;
}
}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 27090c79..630867fa 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __BCM_OSAL_H
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index 52507844..ca8e59db 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __COMMON_HSI__
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 57d6aa95..5d79fdf0 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_H
diff --git a/drivers/net/qede/base/ecore_attn_values.h b/drivers/net/qede/base/ecore_attn_values.h
index d893e0a6..ec773fbd 100644
--- a/drivers/net/qede/base/ecore_attn_values.h
+++ b/drivers/net/qede/base/ecore_attn_values.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ATTN_VALUES_H__
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
index 0b797460..6d0382d3 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_CHAIN_H__
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index a91b2ff3..bf36ce58 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 3bcbe8f1..f8c955ca 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef _ECORE_CID_
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
index 65509add..6c8b2831 100644
--- a/drivers/net/qede/base/ecore_cxt_api.h
+++ b/drivers/net/qede/base/ecore_cxt_api.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_CXT_API_H__
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 9b8d39f6..96678745 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index 49df62ce..519e6cea 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_DCBX_H__
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index 2ad1def4..eaf8e082 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_DCBX_API_H__
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 4ebbedd6..31f1f3ee 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
@@ -2513,9 +2511,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
}
- /* Log and clean previous pglue_b errors if such exist */
+ /* Log and clear previous pglue_b errors if such exist */
ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
- ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
/* Enable the PF's internal FID_enable in the PXP */
rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
@@ -2523,6 +2520,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
goto load_err;
+ /* Clear the pglue_b was_error indication.
+ * In E4 it must be done after the BME and the internal
+ * FID_enable for the PF are set, since VDMs may cause the
+ * indication to be set again.
+ */
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 0dd78d60..02bacc22 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_DEV_API_H__
diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h
index ac29dc49..8c8fed4e 100644
--- a/drivers/net/qede/base/ecore_gtt_reg_addr.h
+++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef GTT_REG_ADDR_H
diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h
index d9af94df..adc20c0c 100644
--- a/drivers/net/qede/base/ecore_gtt_values.h
+++ b/drivers/net/qede/base/ecore_gtt_values.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __PREVENT_PXP_GLOBAL_WIN__
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index d400fa91..2d761b97 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_HSI_COMMON__
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index 262834ed..bf548722 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_HSI_DEBUG_TOOLS__
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index efe3bb24..6b512305 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_HSI_ETH__
diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h
index c318514b..d77edaa1 100644
--- a/drivers/net/qede/base/ecore_hsi_init_func.h
+++ b/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_HSI_INIT_FUNC__
diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h
index 2e338a98..0e157f9b 100644
--- a/drivers/net/qede/base/ecore_hsi_init_tool.h
+++ b/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_HSI_INIT_TOOL__
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index b00c33d4..51bba27e 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index f3f513e8..394207eb 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_HW_H__
diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h
index 2f4bd536..b8c2686f 100644
--- a/drivers/net/qede/base/ecore_hw_defs.h
+++ b/drivers/net/qede/base/ecore_hw_defs.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef _ECORE_IGU_DEF_H_
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index 3f986629..b8496cb2 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 310c9ed7..1024bb26 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef _INIT_FW_FUNCS_H
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index eadccf40..b7636f36 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
/* include the precompiled configuration values - only once */
diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h
index 7ca00e4e..de7846d4 100644
--- a/drivers/net/qede/base/ecore_init_ops.h
+++ b/drivers/net/qede/base/ecore_init_ops.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_INIT_OPS__
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 7272f059..4c271d35 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include <rte_string_fns.h>
@@ -233,15 +231,19 @@ static const char *grc_timeout_attn_master_to_str(u8 master)
static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
u32 tmp, tmp2;
/* We've already cleared the timeout interrupt register, so we learn
- * of interrupts via the validity register
+ * of interrupts via the validity register.
+ * Any attention which is not for a timeout event is treated as fatal.
*/
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
- if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
+ if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) {
+ rc = ECORE_INVAL;
goto out;
+ }
/* Read the GRC timeout information */
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
@@ -265,11 +267,11 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
(tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
ECORE_GRC_ATTENTION_VF_SHIFT);
-out:
- /* Regardles of anything else, clean the validity bit */
+ /* Clean the validity bit */
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
- return ECORE_SUCCESS;
+out:
+ return rc;
}
#define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index bb22fdd8..041240d7 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_INT_H__
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index dff53776..aeaf469e 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_INT_API_H__
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index ee7cad74..29001d71 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_SRIOV_API_H__
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index e4dc1c92..05693029 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __IRO_H__
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index b47abeb9..685fa2e8 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __IRO_VALUES_H__
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 91d89e56..d71f4616 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index bea6a6df..8fa40302 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_L2_H__
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index 43ebbd12..575b9e3a 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_L2_API_H__
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 784d28c5..ea14c172 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index c422736f..8e125310 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_MCP_H__
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 06b33bb8..cfb9f99d 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_MCP_API_H__
diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c
index b48076a3..f7666472 100644
--- a/drivers/net/qede/base/ecore_mng_tlv.c
+++ b/drivers/net/qede/base/ecore_mng_tlv.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index f049d821..f91b25e2 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_PROTO_IF_H__
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 7dec2dd5..721b8c15 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __RT_DEFS_H__
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
index 469bf1d6..4633dbeb 100644
--- a/drivers/net/qede/base/ecore_sp_api.h
+++ b/drivers/net/qede/base/ecore_sp_api.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_SP_API_H__
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index 47c1febf..b43baf9d 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index d160a76e..e57414cf 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_SP_COMMANDS_H__
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index db169e6e..776c86f7 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index 7d9be3e9..6142c399 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_SPQ_H__
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 451aabb1..f7ebf7ad 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
@@ -61,6 +59,8 @@ const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_COALESCE_UPDATE",
"CHANNEL_TLV_QID",
"CHANNEL_TLV_COALESCE_READ",
+ "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
+ "CHANNEL_TLV_UPDATE_MTU",
"CHANNEL_TLV_MAX"
};
@@ -2858,6 +2858,45 @@ out:
length, status);
}
+static enum _ecore_status_t
+ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct ecore_sp_vport_update_params params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_mtu_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Valiate PF can send such a request */
+ if (!p_vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing MTU update\n",
+ p_vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto send_status;
+ }
+
+ p_req = &mbx->req_virt->update_mtu;
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.mtu = p_req->mtu;
+ rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+send_status:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ CHANNEL_TLV_UPDATE_MTU,
+ sizeof(struct pfvf_def_resp_tlv),
+ status);
+ return rc;
+}
+
void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type)
{
@@ -4140,6 +4179,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_READ:
ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_UPDATE_MTU:
+ ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
/* If we've received a message from a VF we consider malicious
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index 8d846d3e..50c7d2c9 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_SRIOV_H__
diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h
index 3af2b57d..b893f1d4 100644
--- a/drivers/net/qede/base/ecore_status.h
+++ b/drivers/net/qede/base/ecore_status.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_STATUS_H__
diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h
index f6459c35..249136b0 100644
--- a/drivers/net/qede/base/ecore_utils.h
+++ b/drivers/net/qede/base/ecore_utils.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_UTILS_H__
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index b7b3b872..d2213f79 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
@@ -1628,6 +1626,39 @@ exit:
return rc;
}
+enum _ecore_status_t
+ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_mtu_tlv *p_req;
+ struct pfvf_def_resp_tlv *p_resp;
+ enum _ecore_status_t rc;
+
+ if (!mtu)
+ return ECORE_INVAL;
+
+ /* clear mailbox and prep header tlv */
+ p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_MTU,
+ sizeof(*p_req));
+ p_req->mtu = mtu;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requesting MTU update to %d\n", mtu);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ if (p_resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED)
+ rc = ECORE_INVAL;
+
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id)
{
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index e26b30bf..a07f82eb 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_VF_H__
@@ -319,5 +317,14 @@ void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
enum BAR_ID bar_id);
+
+/**
+ * @brief - ecore_vf_pf_update_mtu Update MTU for VF.
+ *
+ * @param p_hwfn
+ * @param - mtu
+ */
+enum _ecore_status_t
+ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu);
#endif
#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h
index af7bc36b..1a9fb3b1 100644
--- a/drivers/net/qede/base/ecore_vf_api.h
+++ b/drivers/net/qede/base/ecore_vf_api.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_VF_API_H__
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index dce937e0..c30677ab 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_VF_PF_IF_H__
@@ -531,6 +529,18 @@ struct pfvf_read_coal_resp_tlv {
u8 padding[6];
};
+struct vfpf_bulletin_update_mac_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 mac[ETH_ALEN];
+ u8 padding[2];
+};
+
+struct vfpf_update_mtu_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 mtu;
+ u8 padding[6];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -545,6 +555,8 @@ union vfpf_tlvs {
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
struct vfpf_read_coal_req_tlv read_coal_req;
+ struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
+ struct vfpf_update_mtu_tlv update_mtu;
struct tlv_buffer_size tlv_buf_size;
};
@@ -675,6 +687,8 @@ enum {
CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID,
CHANNEL_TLV_COALESCE_READ,
+ CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ CHANNEL_TLV_UPDATE_MTU,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 9de49b64..abfa6854 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ETH_COMMON__
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 752473e1..81aa88e7 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
/****************************************************************************
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
index a20d0674..ab86260e 100644
--- a/drivers/net/qede/base/nvm_cfg.h
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
/****************************************************************************
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 71f0ca13..402f6204 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 3206cc6c..df52ea92 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "qede_ethdev.h"
@@ -339,6 +337,24 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
}
static void
+qede_interrupt_handler_intx(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ u64 status;
+
+ /* Check if our device actually raised an interrupt */
+ status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
+ if (status & 0x1) {
+ qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+
+ if (rte_intr_enable(eth_dev->intr_handle))
+ DP_ERR(edev, "rte_intr_enable failed\n");
+ }
+}
+
+static void
qede_interrupt_handler(void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
@@ -518,14 +534,9 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
params.update_vport_active_tx_flg = 1;
params.vport_active_rx_flg = flg;
params.vport_active_tx_flg = flg;
- if (!qdev->enable_tx_switching) {
- if ((QEDE_NPAR_TX_SWITCHING != NULL) ||
- ((QEDE_VF_TX_SWITCHING != NULL) && IS_VF(edev))) {
- params.update_tx_switching_flg = 1;
- params.tx_switching_flg = !flg;
- DP_INFO(edev, "%s tx-switching is disabled\n",
- QEDE_NPAR_TX_SWITCHING ? "NPAR" : "VF");
- }
+ if (~qdev->enable_tx_switching & flg) {
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = !flg;
}
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
@@ -603,37 +614,6 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
return 0;
}
-/* Update MTU via vport-update without doing port restart.
- * The vport must be deactivated before calling this API.
- */
-int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_sp_vport_update_params params;
- struct ecore_hwfn *p_hwfn;
- int rc;
- int i;
-
- memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
- params.vport_id = 0;
- params.mtu = mtu;
- params.vport_id = 0;
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- params.opaque_fid = p_hwfn->hw_info.opaque_fid;
- rc = ecore_sp_vport_update(p_hwfn, &params,
- ECORE_SPQ_MODE_EBLOCK, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to update MTU\n");
- return -1;
- }
- }
- DP_INFO(edev, "MTU updated to %u\n", mtu);
-
- return 0;
-}
-
static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
{
memset(ucast, 0, sizeof(struct ecore_filter_ucast));
@@ -979,7 +959,10 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
if (rc == 0)
rc = ecore_filter_ucast_cmd(edev, ucast,
ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS)
+ /* Indicate error only for add filter operation.
+ * Delete filter operations are not severe.
+ */
+ if ((rc != ECORE_SUCCESS) && add)
DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
rc, add);
@@ -993,7 +976,11 @@ qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
struct ecore_filter_ucast ucast;
int re;
+ if (!is_valid_assigned_ether_addr(mac_addr))
+ return -EINVAL;
+
qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_ADD;
ucast.type = ECORE_FILTER_MAC;
ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
@@ -1015,6 +1002,9 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
return;
}
+ if (!is_valid_assigned_ether_addr(&eth_dev->data->mac_addrs[index]))
+ return;
+
qede_set_ucast_cmn_params(&ucast);
ucast.opcode = ECORE_FILTER_REMOVE;
ucast.type = ECORE_FILTER_MAC;
@@ -1038,8 +1028,9 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
return -EPERM;
}
- qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
- return 0;
+ qede_mac_addr_remove(eth_dev, 0);
+
+ return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
@@ -1296,6 +1287,12 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE(edev);
+ /* Update MTU only if it has changed */
+ if (eth_dev->data->mtu != qdev->mtu) {
+ if (qede_update_mtu(eth_dev, qdev->mtu))
+ goto err;
+ }
+
/* Configure TPA parameters */
if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
if (qede_enable_tpa(eth_dev, true))
@@ -1359,9 +1356,6 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
/* Disable traffic */
ecore_hw_stop_fastpath(edev); /* TBD - loop */
- if (IS_PF(edev))
- qede_mac_addr_remove(eth_dev, 0);
-
DP_INFO(edev, "Device is stopped\n");
}
@@ -1387,8 +1381,12 @@ static int qede_args_check(const char *key, const char *val, void *opaque)
}
if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
- (strcmp(QEDE_VF_TX_SWITCHING, key) == 0))
+ ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
qdev->enable_tx_switching = !!tmp;
+ DP_INFO(edev, "Disabling %s tx-switching\n",
+ strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
+ "VF" : "NPAR");
+ }
return ret;
}
@@ -1463,7 +1461,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
/* Parse devargs and fix up rxmode */
if (qede_args(eth_dev))
- return -ENOTSUP;
+ DP_NOTICE(edev, false,
+ "Invalid devargs supplied, requested change will not take effect\n");
if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
rxmode->mq_mode == ETH_MQ_RX_RSS)) {
@@ -1496,8 +1495,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
/* Enable VLAN offloads by default */
ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ ETH_VLAN_FILTER_MASK);
if (ret)
return ret;
@@ -1557,6 +1555,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
@@ -1571,7 +1570,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
@@ -1585,11 +1583,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->default_rxconf = (struct rte_eth_rxconf) {
/* Packets are always dropped if no descriptors are available */
.rx_drop_en = 1,
- /* The below RX offloads are always enabled */
- .offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM),
+ .offloads = 0,
};
memset(&link, 0, sizeof(struct qed_link_output));
@@ -1615,18 +1609,20 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ struct qed_link_output q_link;
+ struct rte_eth_link link;
uint16_t link_duplex;
- struct qed_link_output link;
- struct rte_eth_link *curr = &eth_dev->data->dev_link;
- memset(&link, 0, sizeof(struct qed_link_output));
- qdev->ops->common->get_link(edev, &link);
+ memset(&q_link, 0, sizeof(q_link));
+ memset(&link, 0, sizeof(link));
+
+ qdev->ops->common->get_link(edev, &q_link);
/* Link Speed */
- curr->link_speed = link.speed;
+ link.link_speed = q_link.speed;
/* Link Mode */
- switch (link.duplex) {
+ switch (q_link.duplex) {
case QEDE_DUPLEX_HALF:
link_duplex = ETH_LINK_HALF_DUPLEX;
break;
@@ -1637,21 +1633,20 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
default:
link_duplex = -1;
}
- curr->link_duplex = link_duplex;
+ link.link_duplex = link_duplex;
/* Link Status */
- curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
+ link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
/* AN */
- curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
+ link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
ETH_LINK_AUTONEG : ETH_LINK_FIXED;
DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
- curr->link_speed, curr->link_duplex,
- curr->link_autoneg, curr->link_status);
+ link.link_speed, link.link_duplex,
+ link.link_autoneg, link.link_status);
- /* return 0 means link status changed, -1 means not changed */
- return ((curr->link_status == link.link_up) ? -1 : 0);
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
@@ -1737,8 +1732,20 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
qdev->ops->common->slowpath_stop(edev);
qdev->ops->common->remove(edev);
rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(&pci_dev->intr_handle,
- qede_interrupt_handler, (void *)eth_dev);
+
+ switch (pci_dev->intr_handle.type) {
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ qede_interrupt_handler_intx,
+ (void *)eth_dev);
+ break;
+ default:
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ qede_interrupt_handler,
+ (void *)eth_dev);
+ }
+
if (ECORE_IS_CMT(edev))
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
}
@@ -2056,6 +2063,70 @@ qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
}
+/* Update MTU via vport-update without doing port restart.
+ * The vport must be deactivated before calling this API.
+ */
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ if (IS_PF(edev)) {
+ struct ecore_sp_vport_update_params params;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ params.vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+ } else {
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
+ if (rc == ECORE_INVAL) {
+ DP_INFO(edev, "VF MTU Update TLV not supported\n");
+ /* Recreate vport */
+ rc = qede_start_vport(qdev, mtu);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Restore config lost due to vport stop */
+ if (eth_dev->data->promiscuous)
+ qede_promiscuous_enable(eth_dev);
+ else
+ qede_promiscuous_disable(eth_dev);
+
+ if (eth_dev->data->all_multicast)
+ qede_allmulticast_enable(eth_dev);
+ else
+ qede_allmulticast_disable(eth_dev);
+
+ qede_vlan_offload_set(eth_dev,
+ qdev->vlan_offload_mask);
+ } else if (rc != ECORE_SUCCESS) {
+ goto err;
+ }
+ }
+ }
+ DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
+
+ return 0;
+
+err:
+ DP_ERR(edev, "Failed to update MTU\n");
+ return -1;
+}
+
static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct rte_eth_fc_conf *fc_conf)
{
@@ -2210,7 +2281,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
vport_update_params.vport_id = 0;
/* pass the L2 handles instead of qids */
for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
- idx = qdev->rss_ind_table[i];
+ idx = i % QEDE_RSS_COUNT(qdev);
rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
}
vport_update_params.rss_params = &rss_params;
@@ -2458,12 +2529,8 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
dev->data->dev_started = 0;
qede_dev_stop(dev);
restart = true;
- } else {
- if (IS_PF(edev))
- qede_mac_addr_remove(dev, 0);
}
rte_delay_ms(1000);
- qede_start_vport(qdev, mtu); /* Recreate vport */
qdev->mtu = mtu;
/* Fix up RX buf size for all queues of the port */
@@ -2483,25 +2550,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
}
}
if (max_rx_pkt_len > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
- else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
-
- /* Restore config lost due to vport stop */
- if (IS_PF(edev))
- qede_mac_addr_set(dev, &qdev->primary_mac);
-
- if (dev->data->promiscuous)
- qede_promiscuous_enable(dev);
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- qede_promiscuous_disable(dev);
-
- if (dev->data->all_multicast)
- qede_allmulticast_enable(dev);
- else
- qede_allmulticast_disable(dev);
-
- qede_vlan_offload_set(dev, qdev->vlan_offload_mask);
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
if (!dev->data->dev_started && restart) {
qede_dev_start(dev);
@@ -2999,6 +3050,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.mtu_set = qede_set_mtu,
.udp_tunnel_port_add = qede_udp_dst_port_add,
.udp_tunnel_port_del = qede_udp_dst_port_del,
+ .mac_addr_add = qede_mac_addr_add,
+ .mac_addr_remove = qede_mac_addr_remove,
+ .mac_addr_set = qede_mac_addr_set,
};
static void qede_update_pf_params(struct ecore_dev *edev)
@@ -3027,6 +3081,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Fix up ecore debug level */
uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
uint8_t dp_level = ECORE_LEVEL_VERBOSE;
+ uint32_t int_mode;
int rc;
/* Extract key data structures */
@@ -3071,8 +3126,22 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
return -ENODEV;
}
qede_update_pf_params(edev);
- rte_intr_callback_register(&pci_dev->intr_handle,
- qede_interrupt_handler, (void *)eth_dev);
+
+ switch (pci_dev->intr_handle.type) {
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ int_mode = ECORE_INT_MODE_INTA;
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ qede_interrupt_handler_intx,
+ (void *)eth_dev);
+ break;
+ default:
+ int_mode = ECORE_INT_MODE_MSIX;
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ qede_interrupt_handler,
+ (void *)eth_dev);
+ }
+
if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
@@ -3080,7 +3149,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Start the Slowpath-process */
memset(&params, 0, sizeof(struct qed_slowpath_params));
- params.int_mode = ECORE_INT_MODE_MSIX;
+
+ params.int_mode = int_mode;
params.drv_major = QEDE_PMD_VERSION_MAJOR;
params.drv_minor = QEDE_PMD_VERSION_MINOR;
params.drv_rev = QEDE_PMD_VERSION_REVISION;
@@ -3163,7 +3233,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
ECORE_LEADING_HWFN(edev),
vf_mac,
&is_mac_forced);
- if (is_mac_exist && is_mac_forced) {
+ if (is_mac_exist) {
DP_INFO(edev, "VF macaddr received from PF\n");
ether_addr_copy((struct ether_addr *)&vf_mac,
&eth_dev->data->mac_addrs[0]);
@@ -3371,9 +3441,7 @@ RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
-RTE_INIT(qede_init_log);
-static void
-qede_init_log(void)
+RTE_INIT(qede_init_log)
{
qede_logtype_init = rte_log_register("pmd.net.qede.init");
if (qede_logtype_init >= 0)
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index a335d4da..6e9a5b4b 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -34,6 +32,7 @@
#include "base/nvm_cfg.h"
#include "base/ecore_sp_commands.h"
#include "base/ecore_l2.h"
+#include "base/ecore_vf.h"
#include "qede_logs.h"
#include "qede_if.h"
@@ -45,7 +44,7 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 8
+#define QEDE_PMD_VERSION_MINOR 9
#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
index 9d0b0526..83580d04 100644
--- a/drivers/net/qede/qede_fdir.c
+++ b/drivers/net/qede/qede_fdir.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2017 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include <rte_udp.h>
@@ -465,5 +463,8 @@ int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
udpv4_flow->src_port = ntuple->src_port;
udpv4_flow->dst_port = ntuple->dst_port;
}
+
+ fdir_entry.action.rx_queue = ntuple->queue;
+
return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
}
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 01f17c9a..ee5e54c1 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef _QEDE_IF_H
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
index e7f714f2..3187d97b 100644
--- a/drivers/net/qede/qede_logs.h
+++ b/drivers/net/qede/qede_logs.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef _QEDE_LOGS_H_
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index c3407fe9..46fa8371 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include <limits.h>
@@ -281,7 +279,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
/* Start the slowpath */
memset(&hw_init_params, 0, sizeof(hw_init_params));
hw_init_params.b_hw_start = true;
- hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
+ hw_init_params.int_mode = params->int_mode;
hw_init_params.allow_npar_tx_switch = true;
hw_init_params.bin_fw_data = data;
@@ -634,8 +632,11 @@ void qed_link_update(struct ecore_hwfn *hwfn)
{
struct ecore_dev *edev = hwfn->p_dev;
struct qede_dev *qdev = (struct qede_dev *)edev;
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
- qede_link_update((struct rte_eth_dev *)qdev->ethdev, 0);
+ if (!qede_link_update(dev, 0))
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int qed_drain(struct ecore_dev *edev)
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index bdb5d6f1..0f157ded 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include <rte_net.h>
@@ -192,9 +190,15 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
void qede_rx_queue_release(void *rx_queue)
{
struct qede_rx_queue *rxq = rx_queue;
+ struct qede_dev *qdev = rxq->qdev;
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
if (rxq) {
qede_rx_queue_release_mbufs(rxq);
+ qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
+ qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
rte_free(rxq->sw_rx_ring);
rte_free(rxq);
}
@@ -350,9 +354,14 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
void qede_tx_queue_release(void *tx_queue)
{
struct qede_tx_queue *txq = tx_queue;
+ struct qede_dev *qdev = txq->qdev;
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
if (txq) {
qede_tx_queue_release_mbufs(txq);
+ qdev->ops->common->chain_free(edev, &txq->tx_pbl);
rte_free(txq->sw_tx_ring);
rte_free(txq);
}
@@ -441,8 +450,6 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct qede_fastpath *fp;
- struct qede_rx_queue *rxq;
- struct qede_tx_queue *txq;
uint16_t sb_idx;
uint8_t i;
@@ -467,21 +474,13 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
if (eth_dev->data->rx_queues[i]) {
qede_rx_queue_release(eth_dev->data->rx_queues[i]);
- rxq = eth_dev->data->rx_queues[i];
- qdev->ops->common->chain_free(edev,
- &rxq->rx_bd_ring);
- qdev->ops->common->chain_free(edev,
- &rxq->rx_comp_ring);
eth_dev->data->rx_queues[i] = NULL;
}
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
if (eth_dev->data->tx_queues[i]) {
- txq = eth_dev->data->tx_queues[i];
qede_tx_queue_release(eth_dev->data->tx_queues[i]);
- qdev->ops->common->chain_free(edev,
- &txq->tx_pbl);
eth_dev->data->tx_queues[i] = NULL;
}
}
@@ -1970,7 +1969,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Descriptor based VLAN insertion */
- if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (tx_ol_flags & PKT_TX_VLAN_PKT) {
vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 84a834d2..e710fbae 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -1,9 +1,7 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
@@ -147,7 +145,6 @@
PKT_TX_TCP_SEG)
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
- PKT_TX_QINQ_PKT | \
PKT_TX_VLAN_PKT | \
PKT_TX_TUNNEL_VXLAN | \
PKT_TX_TUNNEL_GENEVE | \
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 35b837c3..791deb0b 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -164,6 +164,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -684,9 +685,7 @@ RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
RTE_PMD_REGISTER_PARAM_STRING(net_ring,
ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");
-RTE_INIT(eth_ring_init_log);
-static void
-eth_ring_init_log(void)
+RTE_INIT(eth_ring_init_log)
{
eth_ring_logtype = rte_log_register("pmd.net.ring");
if (eth_ring_logtype >= 0)
diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
index 3aa14c7b..2d34e869 100644
--- a/drivers/net/sfc/meson.build
+++ b/drivers/net/sfc/meson.build
@@ -6,7 +6,7 @@
# This software was jointly developed between OKTET Labs (under contract
# for Solarflare) and Solarflare Communications, Inc.
-if arch_subdir != 'x86'
+if arch_subdir != 'x86' or cc.sizeof('void *') == 4
build = false
endif
diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h
index 83faad16..ce96e83f 100644
--- a/drivers/net/sfc/sfc_dp_rx.h
+++ b/drivers/net/sfc/sfc_dp_rx.h
@@ -195,6 +195,7 @@ struct sfc_dp_rx {
#define SFC_DP_RX_FEAT_TUNNELS 0x4
#define SFC_DP_RX_FEAT_FLOW_FLAG 0x8
#define SFC_DP_RX_FEAT_FLOW_MARK 0x10
+#define SFC_DP_RX_FEAT_CHECKSUM 0x20
sfc_dp_rx_get_dev_info_t *get_dev_info;
sfc_dp_rx_pool_ops_supported_t *pool_ops_supported;
sfc_dp_rx_qsize_up_rings_t *qsize_up_rings;
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
index a075612c..eda9676c 100644
--- a/drivers/net/sfc/sfc_dp_tx.h
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -39,8 +39,6 @@ struct sfc_dp_tx_qcreate_info {
unsigned int max_fill_level;
/** Minimum number of unused Tx descriptors to do reap */
unsigned int free_thresh;
- /** Transmit queue configuration flags */
- unsigned int flags;
/** Offloads enabled on the transmit queue */
uint64_t offloads;
/** Tx queue size */
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
index 5f5af602..81c8f7fb 100644
--- a/drivers/net/sfc/sfc_ef10_essb_rx.c
+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -322,6 +322,12 @@ sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
const efx_qword_t *qwordp;
uint16_t pkt_len;
+ /* Buffers to be discarded have 0 in packet type */
+ if (unlikely(m->packet_type == 0)) {
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ goto next_buf;
+ }
+
rx_pkts[n_rx_pkts++] = m;
/* Parse pseudo-header */
@@ -357,6 +363,7 @@ sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
EFX_QWORD_FIELD(*qwordp,
ES_EZ_ESSB_RX_PREFIX_MARK);
+next_buf:
m = sfc_ef10_essb_next_mbuf(rxq, m);
} while (todo_bufs-- > 0);
}
@@ -404,21 +411,45 @@ sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
static unsigned int
-sfc_ef10_essb_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
+sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
{
- /*
- * Correct implementation requires EvQ polling and events
- * processing.
- */
- return -ENOTSUP;
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
+ SFC_EF10_ESSB_RXQ_EXCEPTION)))
+ return rxq->bufs_pending;
+
+ while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+ sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
+ evq_old_read_ptr, rxq->evq_read_ptr);
+
+ return rxq->bufs_pending;
}
static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status;
static int
-sfc_ef10_essb_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
- __rte_unused uint16_t offset)
+sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
{
- return -ENOTSUP;
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq);
+
+ if (offset < pending)
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (offset < (rxq->added - rxq->completed) * rxq->block_size +
+ rxq->left_in_completed - rxq->block_size)
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
}
static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
@@ -647,29 +678,20 @@ static void
sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
{
struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
- unsigned int i, j;
+ unsigned int i;
const struct sfc_ef10_essb_rx_sw_desc *rxd;
struct rte_mbuf *m;
- if (rxq->completed != rxq->added && rxq->left_in_completed > 0) {
- rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
- m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
- rxq->block_size - rxq->left_in_completed);
- do {
- rxq->left_in_completed--;
- rte_mempool_put(rxq->refill_mb_pool, m);
- m = sfc_ef10_essb_next_mbuf(rxq, m);
- } while (rxq->left_in_completed > 0);
- rxq->completed++;
- }
-
for (i = rxq->completed; i != rxq->added; ++i) {
rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
- m = rxd->first_mbuf;
- for (j = 0; j < rxq->block_size; ++j) {
+ m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_completed);
+ while (rxq->left_in_completed > 0) {
rte_mempool_put(rxq->refill_mb_pool, m);
m = sfc_ef10_essb_next_mbuf(rxq, m);
+ rxq->left_in_completed--;
}
+ rxq->left_in_completed = rxq->block_size;
}
rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
@@ -683,7 +705,8 @@ struct sfc_dp_rx sfc_ef10_essb_rx = {
SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
},
.features = SFC_DP_RX_FEAT_FLOW_FLAG |
- SFC_DP_RX_FEAT_FLOW_MARK,
+ SFC_DP_RX_FEAT_FLOW_MARK |
+ SFC_DP_RX_FEAT_CHECKSUM,
.get_dev_info = sfc_ef10_essb_rx_get_dev_info,
.pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
.qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 42b35b9b..6a5052b9 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -658,7 +658,8 @@ struct sfc_dp_rx sfc_ef10_rx = {
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
.features = SFC_DP_RX_FEAT_MULTI_PROCESS |
- SFC_DP_RX_FEAT_TUNNELS,
+ SFC_DP_RX_FEAT_TUNNELS |
+ SFC_DP_RX_FEAT_CHECKSUM,
.get_dev_info = sfc_ef10_rx_get_dev_info,
.qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
.qcreate = sfc_ef10_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_rx_ev.h b/drivers/net/sfc/sfc_ef10_rx_ev.h
index 615bd29b..868c755f 100644
--- a/drivers/net/sfc/sfc_ef10_rx_ev.h
+++ b/drivers/net/sfc/sfc_ef10_rx_ev.h
@@ -37,8 +37,10 @@ sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,
if (unlikely(rx_ev.eq_u64[0] &
rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
(1ull << ESF_DZ_RX_ECRC_ERR_LBN) |
- (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN))))
+ (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))) {
+ /* Zero packet type is used as a marker to dicard bad packets */
goto done;
+ }
#if SFC_EF10_RX_EV_ENCAP_SUPPORT
switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) {
@@ -120,6 +122,8 @@ sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,
if (tun_ptype == 0)
l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
break;
+ case ESE_DZ_L3_CLASS_UNKNOWN:
+ break;
default:
/* Unexpected Layer 3 class */
SFC_ASSERT(false);
@@ -157,6 +161,8 @@ sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,
SFC_ASSERT(false);
}
+ SFC_ASSERT(l2_ptype != 0);
+
done:
m->ol_flags = ol_flags & ol_mask;
m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype;
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 1b6499f8..9decbf5a 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -874,14 +874,12 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
}
/*
- * The driver does not use it, but other PMDs update jumbo_frame
+ * The driver does not use it, but other PMDs update jumbo frame
* flag and max_rx_pkt_len when MTU is set.
*/
if (mtu > ETHER_MAX_LEN) {
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- rxmode->jumbo_frame = 1;
}
dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
@@ -1029,7 +1027,7 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
if (rc != 0)
sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
- SFC_ASSERT(rc > 0);
+ SFC_ASSERT(rc >= 0);
return -rc;
}
@@ -1057,9 +1055,7 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
qinfo->conf.rx_free_thresh = rxq->refill_threshold;
qinfo->conf.rx_drop_en = 1;
qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
- qinfo->conf.offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
qinfo->scattered_rx = 1;
@@ -1089,7 +1085,6 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
memset(qinfo, 0, sizeof(*qinfo));
- qinfo->conf.txq_flags = txq_info->txq->flags;
qinfo->conf.offloads = txq_info->txq->offloads;
qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
qinfo->conf.tx_deferred_start = txq_info->deferred_start;
@@ -2097,9 +2092,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
-RTE_INIT(sfc_driver_register_logtype);
-static void
-sfc_driver_register_logtype(void)
+RTE_INIT(sfc_driver_register_logtype)
{
int ret;
diff --git a/drivers/net/sfc/sfc_filter.c b/drivers/net/sfc/sfc_filter.c
index 77e2ea56..6ff380a3 100644
--- a/drivers/net/sfc/sfc_filter.c
+++ b/drivers/net/sfc/sfc_filter.c
@@ -75,6 +75,7 @@ int
sfc_filter_attach(struct sfc_adapter *sa)
{
int rc;
+ unsigned int i;
sfc_log_init(sa, "entry");
@@ -88,6 +89,19 @@ sfc_filter_attach(struct sfc_adapter *sa)
efx_filter_fini(sa->nic);
+ sa->filter.supports_ip_proto_or_addr_filter = B_FALSE;
+ sa->filter.supports_rem_or_local_port_filter = B_FALSE;
+ for (i = 0; i < sa->filter.supported_match_num; ++i) {
+ if (sa->filter.supported_match[i] &
+ (EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST |
+ EFX_FILTER_MATCH_REM_HOST))
+ sa->filter.supports_ip_proto_or_addr_filter = B_TRUE;
+
+ if (sa->filter.supported_match[i] &
+ (EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))
+ sa->filter.supports_rem_or_local_port_filter = B_TRUE;
+ }
+
sfc_log_init(sa, "done");
return 0;
diff --git a/drivers/net/sfc/sfc_filter.h b/drivers/net/sfc/sfc_filter.h
index d3e1c2f9..64ab114e 100644
--- a/drivers/net/sfc/sfc_filter.h
+++ b/drivers/net/sfc/sfc_filter.h
@@ -25,6 +25,16 @@ struct sfc_filter {
uint32_t *supported_match;
/** List of flow rules */
struct sfc_flow_list flow_list;
+ /**
+ * Supports any of ip_proto, remote host or local host
+ * filters. This flag is used for filter match exceptions
+ */
+ boolean_t supports_ip_proto_or_addr_filter;
+ /**
+ * Supports any of remote port or local port filters.
+ * This flag is used for filter match exceptions
+ */
+ boolean_t supports_rem_or_local_port_filter;
};
struct sfc_adapter;
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 5613d59a..371648b0 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -93,6 +93,8 @@ static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
+static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
+static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
static boolean_t
sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
@@ -371,7 +373,8 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
* the outer tag and the next matches the inner tag.
*/
if (mask->tci == supp_mask.tci) {
- vid = rte_bswap16(spec->tci);
+ /* Apply mask to keep VID only */
+ vid = rte_bswap16(spec->tci & mask->tci);
if (!(efx_spec->efs_match_flags &
EFX_FILTER_MATCH_OUTER_VID)) {
@@ -1780,6 +1783,43 @@ sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
}
/**
+ * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
+ * in the same specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same match flag, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+
+ if (filters_count_for_one_val != spec->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect "
+ "while copying by outer VLAN ID");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ spec->filters[i].efs_match_flags |=
+ EFX_FILTER_MATCH_OUTER_VID;
+
+ spec->filters[i].efs_outer_vid = 0;
+ }
+
+ return 0;
+}
+
+/**
* Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
* EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
* specifications after copying.
@@ -1858,6 +1898,36 @@ sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
return B_FALSE;
}
+/**
+ * Check that the list of supported filters has a filter that differs
+ * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
+ * in this case that filter will be used and the flag
+ * EFX_FILTER_MATCH_OUTER_VID is not needed.
+ *
+ * @param match[in]
+ * The match flags of filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ */
+static boolean_t
+sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
+ __rte_unused efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_filter_match_flags_t match_without_vid =
+ match & ~EFX_FILTER_MATCH_OUTER_VID;
+
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if (match_without_vid == filter->supported_match[i])
+ return B_FALSE;
+ }
+
+ return B_TRUE;
+}
+
/*
* Match flags that can be automatically added to filters.
* Selecting the last minimum when searching for the copy flag ensures that the
@@ -1885,6 +1955,12 @@ static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
},
+ {
+ .flag = EFX_FILTER_MATCH_OUTER_VID,
+ .vals_count = 1,
+ .set_vals = sfc_flow_set_outer_vid_flag,
+ .spec_check = sfc_flow_check_outer_vid_flag,
+ },
};
/* Get item from array sfc_flow_copy_flags */
@@ -2094,11 +2170,14 @@ sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
* Check whether the spec maps to a hardware filter which is known to be
* ineffective despite being valid.
*
+ * @param filter[in]
+ * SFC filter with list of supported filters.
* @param spec[in]
* SFC flow specification.
*/
static boolean_t
-sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
+sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
+ struct sfc_flow_spec *spec)
{
unsigned int i;
uint16_t ether_type;
@@ -2114,8 +2193,9 @@ sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
EFX_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_LOC_MAC)) {
ether_type = spec->filters[i].efs_ether_type;
- if (ether_type == EFX_ETHER_TYPE_IPV4 ||
- ether_type == EFX_ETHER_TYPE_IPV6)
+ if (filter->supports_ip_proto_or_addr_filter &&
+ (ether_type == EFX_ETHER_TYPE_IPV4 ||
+ ether_type == EFX_ETHER_TYPE_IPV6))
return B_TRUE;
} else if (sfc_flow_is_match_with_vids(match_flags,
EFX_FILTER_MATCH_ETHER_TYPE |
@@ -2125,8 +2205,9 @@ sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_MAC)) {
ip_proto = spec->filters[i].efs_ip_proto;
- if (ip_proto == EFX_IPPROTO_TCP ||
- ip_proto == EFX_IPPROTO_UDP)
+ if (filter->supports_rem_or_local_port_filter &&
+ (ip_proto == EFX_IPPROTO_TCP ||
+ ip_proto == EFX_IPPROTO_UDP))
return B_TRUE;
}
}
@@ -2153,7 +2234,7 @@ sfc_flow_validate_match_flags(struct sfc_adapter *sa,
return rc;
}
- if (sfc_flow_is_match_flags_exception(&flow->spec)) {
+ if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"The flow rule pattern is unsupported");
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index cc76a5b1..d8503e20 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -517,7 +517,8 @@ struct sfc_dp_rx sfc_efx_rx = {
.type = SFC_DP_RX,
.hw_fw_caps = 0,
},
- .features = SFC_DP_RX_FEAT_SCATTER,
+ .features = SFC_DP_RX_FEAT_SCATTER |
+ SFC_DP_RX_FEAT_CHECKSUM,
.qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.qcreate = sfc_efx_rx_qcreate,
.qdestroy = sfc_efx_rx_qdestroy,
@@ -792,9 +793,12 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
caps |= DEV_RX_OFFLOAD_CRC_STRIP;
- caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
- caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
- caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ if (sa->dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
+ caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+ caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
+ caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
+ }
if (encp->enc_tunnel_encapsulations_supported &&
(sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
@@ -817,10 +821,8 @@ sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
static int
sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
const struct rte_eth_rxconf *rx_conf,
- uint64_t offloads)
+ __rte_unused uint64_t offloads)
{
- uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
- sfc_rx_get_queue_offload_caps(sa);
int rc = 0;
if (rx_conf->rx_thresh.pthresh != 0 ||
@@ -842,14 +844,6 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
rc = EINVAL;
}
- if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
- DEV_RX_OFFLOAD_CHECKSUM)
- sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
-
- if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
- (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
- sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
-
return rc;
}
@@ -1424,6 +1418,8 @@ sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
static int
sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
{
+ uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
+ sfc_rx_get_queue_offload_caps(sa);
struct sfc_rss *rss = &sa->rss;
int rc = 0;
@@ -1443,10 +1439,29 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
rc = EINVAL;
}
- if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
sfc_warn(sa, "FCS stripping cannot be disabled - always on");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- rxmode->hw_strip_crc = 1;
+ }
+
+ /*
+ * Requested offloads are validated against supported by ethdev,
+ * so unsupported offloads cannot be added as the result of
+ * below check.
+ */
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+ (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+ sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
+ rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ }
+
+ if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+ (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+ sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
+ rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
}
return rc;
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 1bcc2c69..6d42a1a6 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -171,7 +171,6 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
txq->free_thresh =
(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
- txq->flags = tx_conf->txq_flags;
txq->offloads = offloads;
rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
@@ -182,7 +181,6 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
memset(&info, 0, sizeof(info));
info.max_fill_level = txq_max_fill_level;
info.free_thresh = txq->free_thresh;
- info.flags = tx_conf->txq_flags;
info.offloads = offloads;
info.txq_entries = txq_info->entries;
info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
@@ -431,18 +429,10 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
if (rc != 0)
goto fail_ev_qstart;
- /*
- * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
- * application which expects that IPv4 checksum offload is enabled
- * all the time as there is no legacy flag to turn off the offload.
- */
- if ((txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ||
- (~txq->flags & ETH_TXQ_FLAGS_IGNORE))
+ if (txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_IPV4;
- if ((txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
- ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)))
+ if (txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_IPV4;
if ((txq->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
@@ -453,16 +443,7 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
- /*
- * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
- * application. In turn, the absence of ETH_TXQ_FLAGS_NOXSUMTCP is
- * associated specifically with a legacy application which expects
- * both TCP checksum offload and TSO to be enabled because the legacy
- * API does not provide a dedicated mechanism to control TSO.
- */
- if ((txq->offloads & DEV_TX_OFFLOAD_TCP_TSO) ||
- ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (~txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP)))
+ if (txq->offloads & DEV_TX_OFFLOAD_TCP_TSO)
flags |= EFX_TXQ_FATSOV2;
rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index c2e5f13e..146b805c 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -58,7 +58,6 @@ struct sfc_txq {
struct sfc_dp_txq *dp;
efx_txq_t *common;
unsigned int free_thresh;
- unsigned int flags;
uint64_t offloads;
};
diff --git a/drivers/net/softnic/Makefile b/drivers/net/softnic/Makefile
index d56fecd6..ea9b65f4 100644
--- a/drivers/net/softnic/Makefile
+++ b/drivers/net/softnic/Makefile
@@ -8,13 +8,15 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_softnic.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_pipeline -lrte_port -lrte_table
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_sched
LDLIBS += -lrte_bus_vdev
-EXPORT_MAP := rte_pmd_eth_softnic_version.map
+EXPORT_MAP := rte_pmd_softnic_version.map
LIBABIVER := 1
@@ -22,11 +24,30 @@ LIBABIVER := 1
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_mempool.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_swq.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_link.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tap.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_action.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_pipeline.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_thread.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cli.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += parser.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += conn.c
#
# Export include files
#
SYMLINK-y-include += rte_eth_softnic.h
+ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+$(info Softnic PMD can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+all:
+clean:
+else
+
include $(RTE_SDK)/mk/rte.lib.mk
+
+endif
diff --git a/drivers/net/softnic/conn.c b/drivers/net/softnic/conn.c
new file mode 100644
index 00000000..990cf40f
--- /dev/null
+++ b/drivers/net/softnic/conn.c
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+
+#define __USE_GNU
+#include <sys/socket.h>
+
+#include <sys/epoll.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <errno.h>
+
+#include "conn.h"
+
+#define MSG_CMD_TOO_LONG "Command too long."
+
+struct softnic_conn {
+ char *welcome;
+ char *prompt;
+ char *buf;
+ char *msg_in;
+ char *msg_out;
+ size_t buf_size;
+ size_t msg_in_len_max;
+ size_t msg_out_len_max;
+ size_t msg_in_len;
+ int fd_server;
+ int fd_client_group;
+ softnic_conn_msg_handle_t msg_handle;
+ void *msg_handle_arg;
+};
+
+struct softnic_conn *
+softnic_conn_init(struct softnic_conn_params *p)
+{
+ struct sockaddr_in server_address;
+ struct softnic_conn *conn;
+ int fd_server, fd_client_group, status;
+
+ memset(&server_address, 0, sizeof(server_address));
+
+ /* Check input arguments */
+ if (p == NULL ||
+ p->welcome == NULL ||
+ p->prompt == NULL ||
+ p->addr == NULL ||
+ p->buf_size == 0 ||
+ p->msg_in_len_max == 0 ||
+ p->msg_out_len_max == 0 ||
+ p->msg_handle == NULL)
+ return NULL;
+
+ status = inet_aton(p->addr, &server_address.sin_addr);
+ if (status == 0)
+ return NULL;
+
+ /* Memory allocation */
+ conn = calloc(1, sizeof(struct softnic_conn));
+ if (conn == NULL)
+ return NULL;
+
+ conn->welcome = calloc(1, CONN_WELCOME_LEN_MAX + 1);
+ conn->prompt = calloc(1, CONN_PROMPT_LEN_MAX + 1);
+ conn->buf = calloc(1, p->buf_size);
+ conn->msg_in = calloc(1, p->msg_in_len_max + 1);
+ conn->msg_out = calloc(1, p->msg_out_len_max + 1);
+
+ if (conn->welcome == NULL ||
+ conn->prompt == NULL ||
+ conn->buf == NULL ||
+ conn->msg_in == NULL ||
+ conn->msg_out == NULL) {
+ softnic_conn_free(conn);
+ return NULL;
+ }
+
+ /* Server socket */
+ server_address.sin_family = AF_INET;
+ server_address.sin_port = htons(p->port);
+
+ fd_server = socket(AF_INET,
+ SOCK_STREAM | SOCK_NONBLOCK,
+ 0);
+ if (fd_server == -1) {
+ softnic_conn_free(conn);
+ return NULL;
+ }
+
+ status = bind(fd_server,
+ (struct sockaddr *)&server_address,
+ sizeof(server_address));
+ if (status == -1) {
+ softnic_conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ status = listen(fd_server, 16);
+ if (status == -1) {
+ softnic_conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ /* Client group */
+ fd_client_group = epoll_create(1);
+ if (fd_client_group == -1) {
+ softnic_conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ /* Fill in */
+ strncpy(conn->welcome, p->welcome, CONN_WELCOME_LEN_MAX);
+ strncpy(conn->prompt, p->prompt, CONN_PROMPT_LEN_MAX);
+ conn->buf_size = p->buf_size;
+ conn->msg_in_len_max = p->msg_in_len_max;
+ conn->msg_out_len_max = p->msg_out_len_max;
+ conn->msg_in_len = 0;
+ conn->fd_server = fd_server;
+ conn->fd_client_group = fd_client_group;
+ conn->msg_handle = p->msg_handle;
+ conn->msg_handle_arg = p->msg_handle_arg;
+
+ return conn;
+}
+
+void
+softnic_conn_free(struct softnic_conn *conn)
+{
+ if (conn == NULL)
+ return;
+
+ if (conn->fd_client_group)
+ close(conn->fd_client_group);
+
+ if (conn->fd_server)
+ close(conn->fd_server);
+
+ free(conn->msg_out);
+ free(conn->msg_in);
+ free(conn->prompt);
+ free(conn->welcome);
+ free(conn);
+}
+
+int
+softnic_conn_poll_for_conn(struct softnic_conn *conn)
+{
+ struct sockaddr_in client_address;
+ struct epoll_event event;
+ socklen_t client_address_length;
+ int fd_client, status;
+
+ /* Check input arguments */
+ if (conn == NULL)
+ return -1;
+
+ /* Server socket */
+ client_address_length = sizeof(client_address);
+ fd_client = accept4(conn->fd_server,
+ (struct sockaddr *)&client_address,
+ &client_address_length,
+ SOCK_NONBLOCK);
+ if (fd_client == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ return 0;
+
+ return -1;
+ }
+
+ /* Client group */
+ event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP;
+ event.data.fd = fd_client;
+
+ status = epoll_ctl(conn->fd_client_group,
+ EPOLL_CTL_ADD,
+ fd_client,
+ &event);
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ /* Client */
+ status = write(fd_client,
+ conn->welcome,
+ strlen(conn->welcome));
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ status = write(fd_client,
+ conn->prompt,
+ strlen(conn->prompt));
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+data_event_handle(struct softnic_conn *conn,
+ int fd_client)
+{
+ ssize_t len, i, status;
+
+ /* Read input message */
+
+ len = read(fd_client,
+ conn->buf,
+ conn->buf_size);
+ if (len == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ return 0;
+
+ return -1;
+ }
+ if (len == 0)
+ return 0;
+
+ /* Handle input messages */
+ for (i = 0; i < len; i++) {
+ if (conn->buf[i] == '\n') {
+ size_t n;
+
+ conn->msg_in[conn->msg_in_len] = 0;
+ conn->msg_out[0] = 0;
+
+ conn->msg_handle(conn->msg_in,
+ conn->msg_out,
+ conn->msg_out_len_max,
+ conn->msg_handle_arg);
+
+ n = strlen(conn->msg_out);
+ if (n) {
+ status = write(fd_client,
+ conn->msg_out,
+ n);
+ if (status == -1)
+ return status;
+ }
+
+ conn->msg_in_len = 0;
+ } else if (conn->msg_in_len < conn->msg_in_len_max) {
+ conn->msg_in[conn->msg_in_len] = conn->buf[i];
+ conn->msg_in_len++;
+ } else {
+ status = write(fd_client,
+ MSG_CMD_TOO_LONG,
+ strlen(MSG_CMD_TOO_LONG));
+ if (status == -1)
+ return status;
+
+ conn->msg_in_len = 0;
+ }
+ }
+
+ /* Write prompt */
+ status = write(fd_client,
+ conn->prompt,
+ strlen(conn->prompt));
+ if (status == -1)
+ return status;
+
+ return 0;
+}
+
+static int
+control_event_handle(struct softnic_conn *conn,
+ int fd_client)
+{
+ int status;
+
+ status = epoll_ctl(conn->fd_client_group,
+ EPOLL_CTL_DEL,
+ fd_client,
+ NULL);
+ if (status == -1)
+ return -1;
+
+ status = close(fd_client);
+ if (status == -1)
+ return -1;
+
+ return 0;
+}
+
+int
+softnic_conn_poll_for_msg(struct softnic_conn *conn)
+{
+ struct epoll_event event;
+ int fd_client, status, status_data = 0, status_control = 0;
+
+ /* Check input arguments */
+ if (conn == NULL)
+ return -1;
+
+ /* Client group */
+ status = epoll_wait(conn->fd_client_group,
+ &event,
+ 1,
+ 0);
+ if (status == -1)
+ return -1;
+ if (status == 0)
+ return 0;
+
+ fd_client = event.data.fd;
+
+ /* Data available */
+ if (event.events & EPOLLIN)
+ status_data = data_event_handle(conn, fd_client);
+
+ /* Control events */
+ if (event.events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP))
+ status_control = control_event_handle(conn, fd_client);
+
+ if (status_data || status_control)
+ return -1;
+
+ return 0;
+}
diff --git a/drivers/net/softnic/conn.h b/drivers/net/softnic/conn.h
new file mode 100644
index 00000000..631edeef
--- /dev/null
+++ b/drivers/net/softnic/conn.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_CONN_H__
+#define __INCLUDE_CONN_H__
+
+#include <stdint.h>
+
+struct softnic_conn;
+
+#ifndef CONN_WELCOME_LEN_MAX
+#define CONN_WELCOME_LEN_MAX 1024
+#endif
+
+#ifndef CONN_PROMPT_LEN_MAX
+#define CONN_PROMPT_LEN_MAX 16
+#endif
+
+typedef void (*softnic_conn_msg_handle_t)(char *msg_in,
+ char *msg_out,
+ size_t msg_out_len_max,
+ void *arg);
+
+struct softnic_conn_params {
+ const char *welcome;
+ const char *prompt;
+ const char *addr;
+ uint16_t port;
+ size_t buf_size;
+ size_t msg_in_len_max;
+ size_t msg_out_len_max;
+ softnic_conn_msg_handle_t msg_handle;
+ void *msg_handle_arg;
+};
+
+struct softnic_conn *
+softnic_conn_init(struct softnic_conn_params *p);
+
+void
+softnic_conn_free(struct softnic_conn *conn);
+
+int
+softnic_conn_poll_for_conn(struct softnic_conn *conn);
+
+int
+softnic_conn_poll_for_msg(struct softnic_conn *conn);
+
+#endif
diff --git a/drivers/net/softnic/firmware.cli b/drivers/net/softnic/firmware.cli
new file mode 100644
index 00000000..300cf6e3
--- /dev/null
+++ b/drivers/net/softnic/firmware.cli
@@ -0,0 +1,21 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2018 Intel Corporation
+
+link LINK dev 0000:02:00.0
+
+pipeline RX period 10 offset_port_id 0
+pipeline RX port in bsz 32 link LINK rxq 0
+pipeline RX port out bsz 32 swq RXQ0
+pipeline RX table match stub
+pipeline RX port in 0 table 0
+pipeline RX table 0 rule add match default action fwd port 0
+
+pipeline TX period 10 offset_port_id 0
+pipeline TX port in bsz 32 swq TXQ0
+pipeline TX port out bsz 32 link LINK txq 0
+pipeline TX table match stub
+pipeline TX port in 0 table 0
+pipeline TX table 0 rule add match default action fwd port 0
+
+thread 1 pipeline RX enable
+thread 1 pipeline TX enable
diff --git a/drivers/net/softnic/hash_func.h b/drivers/net/softnic/hash_func.h
new file mode 100644
index 00000000..198d2b20
--- /dev/null
+++ b/drivers/net/softnic/hash_func.h
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_HASH_FUNC_H__
+#define __INCLUDE_HASH_FUNC_H__
+
+#include <rte_common.h>
+
+static inline uint64_t
+hash_xor_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = seed ^ (k[0] & m[0]);
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+
+ xor0 ^= k[2] & m[2];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+
+ xor0 ^= xor1;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+
+ xor0 ^= xor1;
+
+ xor0 ^= k[4] & m[4];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+
+ xor0 ^= xor1;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+
+ xor0 ^= xor1;
+ xor2 ^= k[6] & m[6];
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2, xor3;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+ xor3 = (k[6] & m[6]) ^ (k[7] & m[7]);
+
+ xor0 ^= xor1;
+ xor2 ^= xor3;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+#if defined(RTE_ARCH_X86_64)
+
+#include <x86intrin.h>
+
+static inline uint64_t
+hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t crc0;
+
+ crc0 = _mm_crc32_u64(seed, k[0] & m[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, crc0, crc1;
+
+ k0 = k[0] & m[0];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, crc0, crc1;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc0 = _mm_crc32_u64(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, k5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
+ crc5 = k5 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
+ crc5 = _mm_crc32_u64(k5 >> 32, k[7] & m[7]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#elif defined(RTE_ARCH_ARM64)
+#include "hash_func_arm64.h"
+#else
+
+#define hash_default_key8 hash_xor_key8
+#define hash_default_key16 hash_xor_key16
+#define hash_default_key24 hash_xor_key24
+#define hash_default_key32 hash_xor_key32
+#define hash_default_key40 hash_xor_key40
+#define hash_default_key48 hash_xor_key48
+#define hash_default_key56 hash_xor_key56
+#define hash_default_key64 hash_xor_key64
+
+#endif
+
+#endif
diff --git a/drivers/net/softnic/hash_func_arm64.h b/drivers/net/softnic/hash_func_arm64.h
new file mode 100644
index 00000000..ae6c0f41
--- /dev/null
+++ b/drivers/net/softnic/hash_func_arm64.h
@@ -0,0 +1,261 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Linaro Limited. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __HASH_FUNC_ARM64_H__
+#define __HASH_FUNC_ARM64_H__
+
+#define _CRC32CX(crc, val) \
+ __asm__("crc32cx %w[c], %w[c], %x[v]":[c] "+r" (crc):[v] "r" (val))
+
+static inline uint64_t
+hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint32_t crc0;
+
+ crc0 = seed;
+ _CRC32CX(crc0, k[0] & m[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1;
+
+ k0 = k[0] & m[0];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ _CRC32CX(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+
+ _CRC32CX(crc0, crc1);
+ _CRC32CX(crc2, crc3);
+
+ crc0 ^= crc2;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ _CRC32CX(crc0, crc1);
+ _CRC32CX(crc2, crc3);
+
+ crc0 ^= crc2;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, k5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ crc4 = k5;
+ _CRC32CX(crc4, k[6] & m[6]);
+ crc5 = k5 >> 32;
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ crc4 = k5;
+ _CRC32CX(crc4, k[6] & m[6]);
+ crc5 = k5 >> 32;
+ _CRC32CX(crc5, k[7] & m[7]);
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#endif
diff --git a/drivers/net/softnic/meson.build b/drivers/net/softnic/meson.build
new file mode 100644
index 00000000..ff982274
--- /dev/null
+++ b/drivers/net/softnic/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+install_headers('rte_eth_softnic.h')
+sources = files('rte_eth_softnic_tm.c',
+ 'rte_eth_softnic.c',
+ 'rte_eth_softnic_mempool.c',
+ 'rte_eth_softnic_swq.c',
+ 'rte_eth_softnic_link.c',
+ 'rte_eth_softnic_tap.c',
+ 'rte_eth_softnic_action.c',
+ 'rte_eth_softnic_pipeline.c',
+ 'rte_eth_softnic_thread.c',
+ 'rte_eth_softnic_cli.c',
+ 'parser.c',
+ 'conn.c')
+deps += ['pipeline', 'port', 'table', 'sched']
diff --git a/drivers/net/softnic/parser.c b/drivers/net/softnic/parser.c
new file mode 100644
index 00000000..a8688a21
--- /dev/null
+++ b/drivers/net/softnic/parser.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation.
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ */
+
+/* For inet_pton4() and inet_pton6() functions:
+ *
+ * Copyright (c) 1996 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
+ * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
+ * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_errno.h>
+
+#include "parser.h"
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+softnic_parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+int
+softnic_parser_read_int32(int32_t *value, const char *p)
+{
+ char *next;
+ int32_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtol(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint64_hex(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+
+ val = strtoul(p, &next, 16);
+ if (p == next)
+ return -EINVAL;
+
+ p = skip_white_spaces(next);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint32_hex(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint16(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint16_hex(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint8(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint8_hex(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens)
+{
+ uint32_t i;
+
+ if (string == NULL ||
+ tokens == NULL ||
+ (*n_tokens < 1))
+ return -EINVAL;
+
+ for (i = 0; i < *n_tokens; i++) {
+ tokens[i] = strtok_r(string, PARSE_DELIMITER, &string);
+ if (tokens[i] == NULL)
+ break;
+ }
+
+ if (i == *n_tokens &&
+ strtok_r(string, PARSE_DELIMITER, &string) != NULL)
+ return -E2BIG;
+
+ *n_tokens = i;
+ return 0;
+}
+
+int
+softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if (src == NULL ||
+ dst == NULL ||
+ size == NULL ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+int
+softnic_parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels)
+{
+ uint32_t n_max_labels = *n_labels, count = 0;
+
+ /* Check for void list of labels */
+ if (strcmp(string, "<void>") == 0) {
+ *n_labels = 0;
+ return 0;
+ }
+
+ /* At least one label should be present */
+ for ( ; (*string != '\0'); ) {
+ char *next;
+ int value;
+
+ if (count >= n_max_labels)
+ return -1;
+
+ if (count > 0) {
+ if (string[0] != ':')
+ return -1;
+
+ string++;
+ }
+
+ value = strtol(string, &next, 10);
+ if (next == string)
+ return -1;
+ string = next;
+
+ labels[count++] = (uint32_t)value;
+ }
+
+ *n_labels = count;
+ return 0;
+}
+
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+
+/* int
+ * inet_pton4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr(digits, ch);
+ if (pch != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return 0;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+ if (octets < 4)
+ return 0;
+
+ memcpy(dst, tmp, INADDRSZ);
+ return 1;
+}
+
+/* int
+ * inet_pton6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton6(const char *src, unsigned char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
+ const char *xdigits = 0, *curtok = 0;
+ int ch = 0, saw_xdigit = 0, count_xdigit = 0;
+ unsigned int val = 0;
+ unsigned int dbloct_count = 0;
+
+ memset((tp = tmp), '\0', IN6ADDRSZ);
+ endp = tp + IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return 0;
+ curtok = src;
+ saw_xdigit = count_xdigit = 0;
+ val = 0;
+
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr((xdigits = xdigits_l), ch);
+ if (pch == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ if (count_xdigit >= 4)
+ return 0;
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return 0;
+ saw_xdigit = 1;
+ count_xdigit++;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return 0;
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return 0;
+ }
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ saw_xdigit = 0;
+ count_xdigit = 0;
+ val = 0;
+ dbloct_count++;
+ continue;
+ }
+ if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+ inet_pton4(curtok, tp) > 0) {
+ tp += INADDRSZ;
+ saw_xdigit = 0;
+ dbloct_count += 2;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return 0;
+ }
+ if (saw_xdigit) {
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ dbloct_count++;
+ }
+ if (colonp != NULL) {
+ /* if we already have 8 double octets, having a colon means error */
+ if (dbloct_count == 8)
+ return 0;
+
+ /* Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[-i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return 0;
+ memcpy(dst, tmp, IN6ADDRSZ);
+ return 1;
+}
+
+static struct ether_addr *
+my_ether_aton(const char *a)
+{
+ int i;
+ char *end;
+ unsigned long o[ETHER_ADDR_LEN];
+ static struct ether_addr ether_addr;
+
+ i = 0;
+ do {
+ errno = 0;
+ o[i] = strtoul(a, &end, 16);
+ if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
+ return NULL;
+ a = end + 1;
+ } while (++i != sizeof(o) / sizeof(o[0]) && end[0] != 0);
+
+ /* Junk at the end of line */
+ if (end[0] != 0)
+ return NULL;
+
+ /* Support the format XX:XX:XX:XX:XX:XX */
+ if (i == ETHER_ADDR_LEN) {
+ while (i-- != 0) {
+ if (o[i] > UINT8_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i] = (uint8_t)o[i];
+ }
+ /* Support the format XXXX:XXXX:XXXX */
+ } else if (i == ETHER_ADDR_LEN / 2) {
+ while (i-- != 0) {
+ if (o[i] > UINT16_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i * 2] = (uint8_t)(o[i] >> 8);
+ ether_addr.addr_bytes[i * 2 + 1] = (uint8_t)(o[i] & 0xff);
+ }
+ /* unknown format */
+ } else
+ return NULL;
+
+ return (struct ether_addr *)&ether_addr;
+}
+
+int
+softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4)
+{
+ if (strlen(token) >= INET_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton4(token, (unsigned char *)ipv4) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6)
+{
+ if (strlen(token) >= INET6_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton6(token, (unsigned char *)ipv6) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+softnic_parse_mac_addr(const char *token, struct ether_addr *addr)
+{
+ struct ether_addr *tmp;
+
+ tmp = my_ether_aton(token);
+ if (tmp == NULL)
+ return -1;
+
+ memcpy(addr, tmp, sizeof(struct ether_addr));
+ return 0;
+}
+
+int
+softnic_parse_cpu_core(const char *entry,
+ struct softnic_cpu_core_params *p)
+{
+ size_t num_len;
+ char num[8];
+
+ uint32_t s = 0, c = 0, h = 0, val;
+ uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0;
+ const char *next = skip_white_spaces(entry);
+ char type;
+
+ if (p == NULL)
+ return -EINVAL;
+
+ /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
+ while (*next != '\0') {
+ /* If everything parsed nothing should left */
+ if (s_parsed && c_parsed && h_parsed)
+ return -EINVAL;
+
+ type = *next;
+ switch (type) {
+ case 's':
+ case 'S':
+ if (s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+ s_parsed = 1;
+ next++;
+ break;
+ case 'c':
+ case 'C':
+ if (c_parsed || h_parsed)
+ return -EINVAL;
+ c_parsed = 1;
+ next++;
+ break;
+ case 'h':
+ case 'H':
+ if (h_parsed)
+ return -EINVAL;
+ h_parsed = 1;
+ next++;
+ break;
+ default:
+ /* If it start from digit it must be only core id. */
+ if (!isdigit(*next) || s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+
+ type = 'C';
+ }
+
+ for (num_len = 0; *next != '\0'; next++, num_len++) {
+ if (num_len == RTE_DIM(num))
+ return -EINVAL;
+
+ if (!isdigit(*next))
+ break;
+
+ num[num_len] = *next;
+ }
+
+ if (num_len == 0 && type != 'h' && type != 'H')
+ return -EINVAL;
+
+ if (num_len != 0 && (type == 'h' || type == 'H'))
+ return -EINVAL;
+
+ num[num_len] = '\0';
+ val = strtol(num, NULL, 10);
+
+ h = 0;
+ switch (type) {
+ case 's':
+ case 'S':
+ s = val;
+ break;
+ case 'c':
+ case 'C':
+ c = val;
+ break;
+ case 'h':
+ case 'H':
+ h = 1;
+ break;
+ }
+ }
+
+ p->socket_id = s;
+ p->core_id = c;
+ p->thread_id = h;
+ return 0;
+}
diff --git a/drivers/net/softnic/parser.h b/drivers/net/softnic/parser.h
new file mode 100644
index 00000000..1ee3f82a
--- /dev/null
+++ b/drivers/net/softnic/parser.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#ifndef __INCLUDE_SOFTNIC_PARSER_H__
+#define __INCLUDE_SOFTNIC_PARSER_H__
+
+#include <stdint.h>
+
+#include <rte_ip.h>
+#include <rte_ether.h>
+
+#define PARSE_DELIMITER " \f\n\r\t\v"
+
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++) \
+ ; \
+ _p; \
+})
+
+static inline size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++)
+ ;
+
+ return i;
+}
+
+int softnic_parser_read_arg_bool(const char *p);
+
+int softnic_parser_read_int32(int32_t *value, const char *p);
+
+int softnic_parser_read_uint64(uint64_t *value, const char *p);
+int softnic_parser_read_uint32(uint32_t *value, const char *p);
+int softnic_parser_read_uint16(uint16_t *value, const char *p);
+int softnic_parser_read_uint8(uint8_t *value, const char *p);
+
+int softnic_parser_read_uint64_hex(uint64_t *value, const char *p);
+int softnic_parser_read_uint32_hex(uint32_t *value, const char *p);
+int softnic_parser_read_uint16_hex(uint16_t *value, const char *p);
+int softnic_parser_read_uint8_hex(uint8_t *value, const char *p);
+
+int softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+int softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4);
+int softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6);
+int softnic_parse_mac_addr(const char *token, struct ether_addr *addr);
+int softnic_parse_mpls_labels(char *string,
+ uint32_t *labels, uint32_t *n_labels);
+
+struct softnic_cpu_core_params {
+ uint32_t socket_id;
+ uint32_t core_id;
+ uint32_t thread_id;
+};
+
+int softnic_parse_cpu_core(const char *entry,
+ struct softnic_cpu_core_params *p);
+
+int softnic_parse_tokenize_string(char *string,
+ char *tokens[], uint32_t *n_tokens);
+
+#endif
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index 6b3c13e5..30fb3952 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -13,43 +13,51 @@
#include <rte_kvargs.h>
#include <rte_errno.h>
#include <rte_ring.h>
-#include <rte_sched.h>
#include <rte_tm_driver.h>
#include "rte_eth_softnic.h"
#include "rte_eth_softnic_internals.h"
-#define DEV_HARD(p) \
- (&rte_eth_devices[p->hard.port_id])
-
-#define PMD_PARAM_SOFT_TM "soft_tm"
-#define PMD_PARAM_SOFT_TM_RATE "soft_tm_rate"
-#define PMD_PARAM_SOFT_TM_NB_QUEUES "soft_tm_nb_queues"
-#define PMD_PARAM_SOFT_TM_QSIZE0 "soft_tm_qsize0"
-#define PMD_PARAM_SOFT_TM_QSIZE1 "soft_tm_qsize1"
-#define PMD_PARAM_SOFT_TM_QSIZE2 "soft_tm_qsize2"
-#define PMD_PARAM_SOFT_TM_QSIZE3 "soft_tm_qsize3"
-#define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz"
-#define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz"
-
-#define PMD_PARAM_HARD_NAME "hard_name"
-#define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id"
+#define PMD_PARAM_FIRMWARE "firmware"
+#define PMD_PARAM_CONN_PORT "conn_port"
+#define PMD_PARAM_CPU_ID "cpu_id"
+#define PMD_PARAM_TM_N_QUEUES "tm_n_queues"
+#define PMD_PARAM_TM_QSIZE0 "tm_qsize0"
+#define PMD_PARAM_TM_QSIZE1 "tm_qsize1"
+#define PMD_PARAM_TM_QSIZE2 "tm_qsize2"
+#define PMD_PARAM_TM_QSIZE3 "tm_qsize3"
static const char *pmd_valid_args[] = {
- PMD_PARAM_SOFT_TM,
- PMD_PARAM_SOFT_TM_RATE,
- PMD_PARAM_SOFT_TM_NB_QUEUES,
- PMD_PARAM_SOFT_TM_QSIZE0,
- PMD_PARAM_SOFT_TM_QSIZE1,
- PMD_PARAM_SOFT_TM_QSIZE2,
- PMD_PARAM_SOFT_TM_QSIZE3,
- PMD_PARAM_SOFT_TM_ENQ_BSZ,
- PMD_PARAM_SOFT_TM_DEQ_BSZ,
- PMD_PARAM_HARD_NAME,
- PMD_PARAM_HARD_TX_QUEUE_ID,
+ PMD_PARAM_FIRMWARE,
+ PMD_PARAM_CONN_PORT,
+ PMD_PARAM_CPU_ID,
+ PMD_PARAM_TM_N_QUEUES,
+ PMD_PARAM_TM_QSIZE0,
+ PMD_PARAM_TM_QSIZE1,
+ PMD_PARAM_TM_QSIZE2,
+ PMD_PARAM_TM_QSIZE3,
NULL
};
+static const char welcome[] =
+ "\n"
+ "Welcome to Soft NIC!\n"
+ "\n";
+
+static const char prompt[] = "softnic> ";
+
+struct softnic_conn_params conn_params_default = {
+ .welcome = welcome,
+ .prompt = prompt,
+ .addr = "0.0.0.0",
+ .port = 0,
+ .buf_size = 1024 * 1024,
+ .msg_in_len_max = 1024,
+ .msg_out_len_max = 1024 * 1024,
+ .msg_handle = softnic_cli_process,
+ .msg_handle_arg = NULL,
+};
+
static const struct rte_eth_dev_info pmd_dev_info = {
.min_rx_bufsize = 0,
.max_rx_pktlen = UINT32_MAX,
@@ -65,6 +73,7 @@ static const struct rte_eth_dev_info pmd_dev_info = {
.nb_min = 0,
.nb_align = 1,
},
+ .rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP,
};
static int pmd_softnic_logtype;
@@ -81,50 +90,36 @@ pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
}
static int
-pmd_dev_configure(struct rte_eth_dev *dev)
+pmd_dev_configure(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *p = dev->data->dev_private;
- struct rte_eth_dev *hard_dev = DEV_HARD(p);
-
- if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
- return -1;
-
- if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
- return -1;
-
return 0;
}
static int
pmd_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t rx_queue_id,
- uint16_t nb_rx_desc __rte_unused,
- unsigned int socket_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id __rte_unused,
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mb_pool __rte_unused)
{
+ char name[NAME_SIZE];
struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_swq *swq;
- if (p->params.soft.intrusive == 0) {
- struct pmd_rx_queue *rxq;
-
- rxq = rte_zmalloc_socket(p->params.soft.name,
- sizeof(struct pmd_rx_queue), 0, socket_id);
- if (rxq == NULL)
- return -ENOMEM;
+ struct softnic_swq_params params = {
+ .size = nb_rx_desc,
+ };
- rxq->hard.port_id = p->hard.port_id;
- rxq->hard.rx_queue_id = rx_queue_id;
- dev->data->rx_queues[rx_queue_id] = rxq;
- } else {
- struct rte_eth_dev *hard_dev = DEV_HARD(p);
- void *rxq = hard_dev->data->rx_queues[rx_queue_id];
+ snprintf(name, sizeof(name), "RXQ%u", rx_queue_id);
- if (rxq == NULL)
- return -1;
+ swq = softnic_swq_create(p,
+ name,
+ &params);
+ if (swq == NULL)
+ return -1;
- dev->data->rx_queues[rx_queue_id] = rxq;
- }
+ dev->data->rx_queues[rx_queue_id] = swq->r;
return 0;
}
@@ -132,21 +127,26 @@ static int
pmd_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_queue_id,
uint16_t nb_tx_desc,
- unsigned int socket_id,
+ unsigned int socket_id __rte_unused,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
- uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
- char name[size];
- struct rte_ring *r;
-
- snprintf(name, sizeof(name), "%s_txq%04x",
- dev->data->name, tx_queue_id);
- r = rte_ring_create(name, nb_tx_desc, socket_id,
- RING_F_SP_ENQ | RING_F_SC_DEQ);
- if (r == NULL)
+ char name[NAME_SIZE];
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_swq *swq;
+
+ struct softnic_swq_params params = {
+ .size = nb_tx_desc,
+ };
+
+ snprintf(name, sizeof(name), "TXQ%u", tx_queue_id);
+
+ swq = softnic_swq_create(p,
+ name,
+ &params);
+ if (swq == NULL)
return -1;
- dev->data->tx_queues[tx_queue_id] = r;
+ dev->data->tx_queues[tx_queue_id] = swq->r;
return 0;
}
@@ -154,23 +154,19 @@ static int
pmd_dev_start(struct rte_eth_dev *dev)
{
struct pmd_internals *p = dev->data->dev_private;
+ int status;
- if (tm_used(dev)) {
- int status = tm_start(p);
-
- if (status)
- return status;
- }
+ /* Firmware */
+ status = softnic_cli_script_process(p,
+ p->params.firmware,
+ conn_params_default.msg_in_len_max,
+ conn_params_default.msg_out_len_max);
+ if (status)
+ return status;
+ /* Link UP */
dev->data->dev_link.link_status = ETH_LINK_UP;
- if (p->params.soft.intrusive) {
- struct rte_eth_dev *hard_dev = DEV_HARD(p);
-
- /* The hard_dev->rx_pkt_burst should be stable by now */
- dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
- }
-
return 0;
}
@@ -179,20 +175,27 @@ pmd_dev_stop(struct rte_eth_dev *dev)
{
struct pmd_internals *p = dev->data->dev_private;
+ /* Link DOWN */
dev->data->dev_link.link_status = ETH_LINK_DOWN;
- if (tm_used(dev))
- tm_stop(p);
+ /* Firmware */
+ softnic_pipeline_disable_all(p);
+ softnic_pipeline_free(p);
+ softnic_table_action_profile_free(p);
+ softnic_port_in_action_profile_free(p);
+ softnic_tap_free(p);
+ softnic_tmgr_free(p);
+ softnic_link_free(p);
+ softnic_softnic_swq_free_keep_rxq_txq(p);
+ softnic_mempool_free(p);
+
+ tm_hierarchy_free(p);
}
static void
-pmd_dev_close(struct rte_eth_dev *dev)
+pmd_dev_close(struct rte_eth_dev *dev __rte_unused)
{
- uint32_t i;
-
- /* TX queues */
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
+ return;
}
static int
@@ -203,10 +206,9 @@ pmd_link_update(struct rte_eth_dev *dev __rte_unused,
}
static int
-pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
+pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
{
- *(const struct rte_tm_ops **)arg =
- (tm_enabled(dev)) ? &pmd_tm_ops : NULL;
+ *(const struct rte_tm_ops **)arg = &pmd_tm_ops;
return 0;
}
@@ -228,12 +230,10 @@ pmd_rx_pkt_burst(void *rxq,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct pmd_rx_queue *rx_queue = rxq;
-
- return rte_eth_rx_burst(rx_queue->hard.port_id,
- rx_queue->hard.rx_queue_id,
- rx_pkts,
- nb_pkts);
+ return (uint16_t)rte_ring_sc_dequeue_burst(rxq,
+ (void **)rx_pkts,
+ nb_pkts,
+ NULL);
}
static uint16_t
@@ -241,239 +241,56 @@ pmd_tx_pkt_burst(void *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- return (uint16_t)rte_ring_enqueue_burst(txq,
+ return (uint16_t)rte_ring_sp_enqueue_burst(txq,
(void **)tx_pkts,
nb_pkts,
NULL);
}
-static __rte_always_inline int
-run_default(struct rte_eth_dev *dev)
-{
- struct pmd_internals *p = dev->data->dev_private;
-
- /* Persistent context: Read Only (update not required) */
- struct rte_mbuf **pkts = p->soft.def.pkts;
- uint16_t nb_tx_queues = dev->data->nb_tx_queues;
-
- /* Persistent context: Read - Write (update required) */
- uint32_t txq_pos = p->soft.def.txq_pos;
- uint32_t pkts_len = p->soft.def.pkts_len;
- uint32_t flush_count = p->soft.def.flush_count;
-
- /* Not part of the persistent context */
- uint32_t pos;
- uint16_t i;
-
- /* Soft device TXQ read, Hard device TXQ write */
- for (i = 0; i < nb_tx_queues; i++) {
- struct rte_ring *txq = dev->data->tx_queues[txq_pos];
-
- /* Read soft device TXQ burst to packet enqueue buffer */
- pkts_len += rte_ring_sc_dequeue_burst(txq,
- (void **)&pkts[pkts_len],
- DEFAULT_BURST_SIZE,
- NULL);
-
- /* Increment soft device TXQ */
- txq_pos++;
- if (txq_pos >= nb_tx_queues)
- txq_pos = 0;
-
- /* Hard device TXQ write when complete burst is available */
- if (pkts_len >= DEFAULT_BURST_SIZE) {
- for (pos = 0; pos < pkts_len; )
- pos += rte_eth_tx_burst(p->hard.port_id,
- p->params.hard.tx_queue_id,
- &pkts[pos],
- (uint16_t)(pkts_len - pos));
-
- pkts_len = 0;
- flush_count = 0;
- break;
- }
- }
-
- if (flush_count >= FLUSH_COUNT_THRESHOLD) {
- for (pos = 0; pos < pkts_len; )
- pos += rte_eth_tx_burst(p->hard.port_id,
- p->params.hard.tx_queue_id,
- &pkts[pos],
- (uint16_t)(pkts_len - pos));
-
- pkts_len = 0;
- flush_count = 0;
- }
-
- p->soft.def.txq_pos = txq_pos;
- p->soft.def.pkts_len = pkts_len;
- p->soft.def.flush_count = flush_count + 1;
-
- return 0;
-}
-
-static __rte_always_inline int
-run_tm(struct rte_eth_dev *dev)
-{
- struct pmd_internals *p = dev->data->dev_private;
-
- /* Persistent context: Read Only (update not required) */
- struct rte_sched_port *sched = p->soft.tm.sched;
- struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
- struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
- uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
- uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
- uint16_t nb_tx_queues = dev->data->nb_tx_queues;
-
- /* Persistent context: Read - Write (update required) */
- uint32_t txq_pos = p->soft.tm.txq_pos;
- uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
- uint32_t flush_count = p->soft.tm.flush_count;
-
- /* Not part of the persistent context */
- uint32_t pkts_deq_len, pos;
- uint16_t i;
-
- /* Soft device TXQ read, TM enqueue */
- for (i = 0; i < nb_tx_queues; i++) {
- struct rte_ring *txq = dev->data->tx_queues[txq_pos];
-
- /* Read TXQ burst to packet enqueue buffer */
- pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
- (void **)&pkts_enq[pkts_enq_len],
- enq_bsz,
- NULL);
-
- /* Increment TXQ */
- txq_pos++;
- if (txq_pos >= nb_tx_queues)
- txq_pos = 0;
-
- /* TM enqueue when complete burst is available */
- if (pkts_enq_len >= enq_bsz) {
- rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
-
- pkts_enq_len = 0;
- flush_count = 0;
- break;
- }
- }
-
- if (flush_count >= FLUSH_COUNT_THRESHOLD) {
- if (pkts_enq_len)
- rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
-
- pkts_enq_len = 0;
- flush_count = 0;
- }
-
- p->soft.tm.txq_pos = txq_pos;
- p->soft.tm.pkts_enq_len = pkts_enq_len;
- p->soft.tm.flush_count = flush_count + 1;
-
- /* TM dequeue, Hard device TXQ write */
- pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
-
- for (pos = 0; pos < pkts_deq_len; )
- pos += rte_eth_tx_burst(p->hard.port_id,
- p->params.hard.tx_queue_id,
- &pkts_deq[pos],
- (uint16_t)(pkts_deq_len - pos));
-
- return 0;
-}
-
-int
-rte_pmd_softnic_run(uint16_t port_id)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
-#endif
-
- return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
-}
-
-static struct ether_addr eth_addr = { .addr_bytes = {0} };
-
-static uint32_t
-eth_dev_speed_max_mbps(uint32_t speed_capa)
-{
- uint32_t rate_mbps[32] = {
- ETH_SPEED_NUM_NONE,
- ETH_SPEED_NUM_10M,
- ETH_SPEED_NUM_10M,
- ETH_SPEED_NUM_100M,
- ETH_SPEED_NUM_100M,
- ETH_SPEED_NUM_1G,
- ETH_SPEED_NUM_2_5G,
- ETH_SPEED_NUM_5G,
- ETH_SPEED_NUM_10G,
- ETH_SPEED_NUM_20G,
- ETH_SPEED_NUM_25G,
- ETH_SPEED_NUM_40G,
- ETH_SPEED_NUM_50G,
- ETH_SPEED_NUM_56G,
- ETH_SPEED_NUM_100G,
- };
-
- uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
- return rate_mbps[pos];
-}
-
-static int
-default_init(struct pmd_internals *p,
- struct pmd_params *params,
- int numa_node)
-{
- p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
- 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
- 0,
- numa_node);
-
- if (p->soft.def.pkts == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-static void
-default_free(struct pmd_internals *p)
-{
- rte_free(p->soft.def.pkts);
-}
-
static void *
-pmd_init(struct pmd_params *params, int numa_node)
+pmd_init(struct pmd_params *params)
{
struct pmd_internals *p;
int status;
- p = rte_zmalloc_socket(params->soft.name,
+ p = rte_zmalloc_socket(params->name,
sizeof(struct pmd_internals),
0,
- numa_node);
+ params->cpu_id);
if (p == NULL)
return NULL;
+ /* Params */
memcpy(&p->params, params, sizeof(p->params));
- rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
- /* Default */
- status = default_init(p, params, numa_node);
+ /* Resources */
+ tm_hierarchy_init(p);
+
+ softnic_mempool_init(p);
+ softnic_swq_init(p);
+ softnic_link_init(p);
+ softnic_tmgr_init(p);
+ softnic_tap_init(p);
+ softnic_port_in_action_profile_init(p);
+ softnic_table_action_profile_init(p);
+ softnic_pipeline_init(p);
+
+ status = softnic_thread_init(p);
if (status) {
- free(p->params.hard.name);
rte_free(p);
return NULL;
}
- /* Traffic Management (TM)*/
- if (params->soft.flags & PMD_FEATURE_TM) {
- status = tm_init(p, params, numa_node);
- if (status) {
- default_free(p);
- free(p->params.hard.name);
+ if (params->conn_port) {
+ struct softnic_conn_params conn_params;
+
+ memcpy(&conn_params, &conn_params_default, sizeof(conn_params));
+ conn_params.port = p->params.conn_port;
+ conn_params.msg_handle_arg = p;
+
+ p->conn = softnic_conn_init(&conn_params);
+ if (p->conn == NULL) {
+ softnic_thread_free(p);
rte_free(p);
return NULL;
}
@@ -485,57 +302,62 @@ pmd_init(struct pmd_params *params, int numa_node)
static void
pmd_free(struct pmd_internals *p)
{
- if (p->params.soft.flags & PMD_FEATURE_TM)
- tm_free(p);
+ if (p == NULL)
+ return;
- default_free(p);
+ if (p->params.conn_port)
+ softnic_conn_free(p->conn);
+
+ softnic_thread_free(p);
+ softnic_pipeline_free(p);
+ softnic_table_action_profile_free(p);
+ softnic_port_in_action_profile_free(p);
+ softnic_tap_free(p);
+ softnic_tmgr_free(p);
+ softnic_link_free(p);
+ softnic_swq_free(p);
+ softnic_mempool_free(p);
+
+ tm_hierarchy_free(p);
- free(p->params.hard.name);
rte_free(p);
}
+static struct ether_addr eth_addr = {
+ .addr_bytes = {0},
+};
+
static int
pmd_ethdev_register(struct rte_vdev_device *vdev,
struct pmd_params *params,
void *dev_private)
{
- struct rte_eth_dev_info hard_info;
- struct rte_eth_dev *soft_dev;
- uint32_t hard_speed;
- int numa_node;
- uint16_t hard_port_id;
-
- rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
- rte_eth_dev_info_get(hard_port_id, &hard_info);
- hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
- numa_node = rte_eth_dev_socket_id(hard_port_id);
+ struct rte_eth_dev *dev;
/* Ethdev entry allocation */
- soft_dev = rte_eth_dev_allocate(params->soft.name);
- if (!soft_dev)
+ dev = rte_eth_dev_allocate(params->name);
+ if (!dev)
return -ENOMEM;
/* dev */
- soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
- NULL : /* set up later */
- pmd_rx_pkt_burst;
- soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
- soft_dev->tx_pkt_prepare = NULL;
- soft_dev->dev_ops = &pmd_ops;
- soft_dev->device = &vdev->device;
+ dev->rx_pkt_burst = pmd_rx_pkt_burst;
+ dev->tx_pkt_burst = pmd_tx_pkt_burst;
+ dev->tx_pkt_prepare = NULL;
+ dev->dev_ops = &pmd_ops;
+ dev->device = &vdev->device;
/* dev->data */
- soft_dev->data->dev_private = dev_private;
- soft_dev->data->dev_link.link_speed = hard_speed;
- soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- soft_dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
- soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
- soft_dev->data->mac_addrs = &eth_addr;
- soft_dev->data->promiscuous = 1;
- soft_dev->data->kdrv = RTE_KDRV_NONE;
- soft_dev->data->numa_node = numa_node;
-
- rte_eth_dev_probing_finish(soft_dev);
+ dev->data->dev_private = dev_private;
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
+ dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->mac_addrs = &eth_addr;
+ dev->data->promiscuous = 1;
+ dev->data->kdrv = RTE_KDRV_NONE;
+ dev->data->numa_node = params->cpu_id;
+
+ rte_eth_dev_probing_finish(dev);
return 0;
}
@@ -566,10 +388,21 @@ get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
}
static int
-pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
+get_uint16(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint16_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+
+static int
+pmd_parse_args(struct pmd_params *p, const char *params)
{
struct rte_kvargs *kvlist;
- int i, ret;
+ int ret = 0;
kvlist = rte_kvargs_parse(params, pmd_valid_args);
if (kvlist == NULL)
@@ -577,141 +410,71 @@ pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
/* Set default values */
memset(p, 0, sizeof(*p));
- p->soft.name = name;
- p->soft.intrusive = INTRUSIVE;
- p->soft.tm.rate = 0;
- p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
- p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
- p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
- p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
- p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
-
- /* SOFT: TM (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
- char *s;
-
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
- &get_string, &s);
+ p->firmware = SOFTNIC_FIRMWARE;
+ p->cpu_id = SOFTNIC_CPU_ID;
+ p->tm.n_queues = SOFTNIC_TM_N_QUEUES;
+ p->tm.qsize[0] = SOFTNIC_TM_QUEUE_SIZE;
+ p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE;
+ p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE;
+ p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE;
+
+ /* Firmware script (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_FIRMWARE,
+ &get_string, &p->firmware);
if (ret < 0)
goto out_free;
-
- if (strcmp(s, "on") == 0)
- p->soft.flags |= PMD_FEATURE_TM;
- else if (strcmp(s, "off") == 0)
- p->soft.flags &= ~PMD_FEATURE_TM;
- else
- ret = -EINVAL;
-
- free(s);
- if (ret)
- goto out_free;
}
- /* SOFT: TM rate (measured in bytes/second) (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
- &get_uint32, &p->soft.tm.rate);
+ /* Connection listening port (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_CONN_PORT) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_CONN_PORT,
+ &get_uint16, &p->conn_port);
if (ret < 0)
goto out_free;
-
- p->soft.flags |= PMD_FEATURE_TM;
}
- /* SOFT: TM number of queues (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
- &get_uint32, &p->soft.tm.nb_queues);
+ /* CPU ID (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_CPU_ID) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_CPU_ID,
+ &get_uint32, &p->cpu_id);
if (ret < 0)
goto out_free;
-
- p->soft.flags |= PMD_FEATURE_TM;
}
- /* SOFT: TM queue size 0 .. 3 (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
- uint32_t qsize;
-
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
- &get_uint32, &qsize);
+ /* TM number of queues (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_N_QUEUES) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_N_QUEUES,
+ &get_uint32, &p->tm.n_queues);
if (ret < 0)
goto out_free;
-
- p->soft.tm.qsize[0] = (uint16_t)qsize;
- p->soft.flags |= PMD_FEATURE_TM;
}
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
- uint32_t qsize;
-
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
- &get_uint32, &qsize);
+ /* TM queue size 0 .. 3 (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE0) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE0,
+ &get_uint32, &p->tm.qsize[0]);
if (ret < 0)
goto out_free;
-
- p->soft.tm.qsize[1] = (uint16_t)qsize;
- p->soft.flags |= PMD_FEATURE_TM;
}
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
- uint32_t qsize;
-
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
- &get_uint32, &qsize);
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE1) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE1,
+ &get_uint32, &p->tm.qsize[1]);
if (ret < 0)
goto out_free;
-
- p->soft.tm.qsize[2] = (uint16_t)qsize;
- p->soft.flags |= PMD_FEATURE_TM;
}
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
- uint32_t qsize;
-
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
- &get_uint32, &qsize);
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE2) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE2,
+ &get_uint32, &p->tm.qsize[2]);
if (ret < 0)
goto out_free;
-
- p->soft.tm.qsize[3] = (uint16_t)qsize;
- p->soft.flags |= PMD_FEATURE_TM;
}
- /* SOFT: TM enqueue burst size (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
- &get_uint32, &p->soft.tm.enq_bsz);
- if (ret < 0)
- goto out_free;
-
- p->soft.flags |= PMD_FEATURE_TM;
- }
-
- /* SOFT: TM dequeue burst size (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
- &get_uint32, &p->soft.tm.deq_bsz);
- if (ret < 0)
- goto out_free;
-
- p->soft.flags |= PMD_FEATURE_TM;
- }
-
- /* HARD: name (mandatory) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
- &get_string, &p->hard.name);
- if (ret < 0)
- goto out_free;
- } else {
- ret = -EINVAL;
- goto out_free;
- }
-
- /* HARD: tx_queue_id (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
- &get_uint32, &p->hard.tx_queue_id);
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE3) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE3,
+ &get_uint32, &p->tm.qsize[3]);
if (ret < 0)
goto out_free;
}
@@ -726,68 +489,31 @@ pmd_probe(struct rte_vdev_device *vdev)
{
struct pmd_params p;
const char *params;
- int status;
+ int status = 0;
- struct rte_eth_dev_info hard_info;
- uint32_t hard_speed;
- uint16_t hard_port_id;
- int numa_node;
void *dev_private;
- struct rte_eth_dev *eth_dev;
const char *name = rte_vdev_device_name(vdev);
PMD_LOG(INFO, "Probing device \"%s\"", name);
/* Parse input arguments */
params = rte_vdev_device_args(vdev);
-
- if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
- strlen(params) == 0) {
- eth_dev = rte_eth_dev_attach_secondary(name);
- if (!eth_dev) {
- PMD_LOG(ERR, "Failed to probe %s", name);
- return -1;
- }
- /* TODO: request info from primary to set up Rx and Tx */
- eth_dev->dev_ops = &pmd_ops;
- rte_eth_dev_probing_finish(eth_dev);
- return 0;
- }
-
if (!params)
return -EINVAL;
- status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
+ status = pmd_parse_args(&p, params);
if (status)
return status;
- /* Check input arguments */
- if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
- return -EINVAL;
-
- rte_eth_dev_info_get(hard_port_id, &hard_info);
- hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
- numa_node = rte_eth_dev_socket_id(hard_port_id);
-
- if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
- return -EINVAL;
-
- if (p.soft.flags & PMD_FEATURE_TM) {
- status = tm_params_check(&p, hard_speed);
-
- if (status)
- return status;
- }
+ p.name = name;
/* Allocate and initialize soft ethdev private data */
- dev_private = pmd_init(&p, numa_node);
+ dev_private = pmd_init(&p);
if (dev_private == NULL)
return -ENOMEM;
/* Register soft ethdev */
- PMD_LOG(INFO,
- "Creating soft ethdev \"%s\" for hard ethdev \"%s\"",
- p.soft.name, p.hard.name);
+ PMD_LOG(INFO, "Creating soft ethdev \"%s\"", p.name);
status = pmd_ethdev_register(vdev, &p, dev_private);
if (status) {
@@ -807,8 +533,7 @@ pmd_remove(struct rte_vdev_device *vdev)
if (!vdev)
return -EINVAL;
- PMD_LOG(INFO, "Removing device \"%s\"",
- rte_vdev_device_name(vdev));
+ PMD_LOG(INFO, "Removing device \"%s\"", rte_vdev_device_name(vdev));
/* Find the ethdev entry */
dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
@@ -817,9 +542,9 @@ pmd_remove(struct rte_vdev_device *vdev)
p = dev->data->dev_private;
/* Free device data structures*/
- pmd_free(p);
rte_free(dev->data);
rte_eth_dev_release_port(dev);
+ pmd_free(p);
return 0;
}
@@ -831,23 +556,39 @@ static struct rte_vdev_driver pmd_softnic_drv = {
RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
- PMD_PARAM_SOFT_TM "=on|off "
- PMD_PARAM_SOFT_TM_RATE "=<int> "
- PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
- PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
- PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
- PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
- PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
- PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
- PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
- PMD_PARAM_HARD_NAME "=<string> "
- PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");
-
-RTE_INIT(pmd_softnic_init_log);
-static void
-pmd_softnic_init_log(void)
+ PMD_PARAM_FIRMWARE "=<string> "
+ PMD_PARAM_CONN_PORT "=<uint16> "
+ PMD_PARAM_CPU_ID "=<uint32> "
+ PMD_PARAM_TM_N_QUEUES "=<uint32> "
+ PMD_PARAM_TM_QSIZE0 "=<uint32> "
+ PMD_PARAM_TM_QSIZE1 "=<uint32> "
+ PMD_PARAM_TM_QSIZE2 "=<uint32> "
+ PMD_PARAM_TM_QSIZE3 "=<uint32>"
+);
+
+
+RTE_INIT(pmd_softnic_init_log)
{
pmd_softnic_logtype = rte_log_register("pmd.net.softnic");
if (pmd_softnic_logtype >= 0)
rte_log_set_level(pmd_softnic_logtype, RTE_LOG_NOTICE);
}
+
+int
+rte_pmd_softnic_manage(uint16_t port_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct pmd_internals *softnic;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+#endif
+
+ softnic = dev->data->dev_private;
+
+ softnic_conn_poll_for_conn(softnic->conn);
+
+ softnic_conn_poll_for_msg(softnic->conn);
+
+ return 0;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic.h b/drivers/net/softnic/rte_eth_softnic.h
index 9a2c7ba9..048dfe6b 100644
--- a/drivers/net/softnic/rte_eth_softnic.h
+++ b/drivers/net/softnic/rte_eth_softnic.h
@@ -11,42 +11,53 @@
extern "C" {
#endif
-#ifndef SOFTNIC_SOFT_TM_NB_QUEUES
-#define SOFTNIC_SOFT_TM_NB_QUEUES 65536
+/** Firmware. */
+#ifndef SOFTNIC_FIRMWARE
+#define SOFTNIC_FIRMWARE "firmware.cli"
#endif
-#ifndef SOFTNIC_SOFT_TM_QUEUE_SIZE
-#define SOFTNIC_SOFT_TM_QUEUE_SIZE 64
+/** TCP connection port (0 = no connectivity). */
+#ifndef SOFTNIC_CONN_PORT
+#define SOFTNIC_CONN_PORT 0
#endif
-#ifndef SOFTNIC_SOFT_TM_ENQ_BSZ
-#define SOFTNIC_SOFT_TM_ENQ_BSZ 32
+/** NUMA node ID. */
+#ifndef SOFTNIC_CPU_ID
+#define SOFTNIC_CPU_ID 0
#endif
-#ifndef SOFTNIC_SOFT_TM_DEQ_BSZ
-#define SOFTNIC_SOFT_TM_DEQ_BSZ 24
+/** Traffic Manager: Number of scheduler queues. */
+#ifndef SOFTNIC_TM_N_QUEUES
+#define SOFTNIC_TM_N_QUEUES (64 * 1024)
#endif
-#ifndef SOFTNIC_HARD_TX_QUEUE_ID
-#define SOFTNIC_HARD_TX_QUEUE_ID 0
+/** Traffic Manager: Scheduler queue size (per traffic class). */
+#ifndef SOFTNIC_TM_QUEUE_SIZE
+#define SOFTNIC_TM_QUEUE_SIZE 64
#endif
/**
- * Run the traffic management function on the softnic device
+ * Soft NIC run.
*
- * This function read the packets from the softnic input queues, insert into
- * QoS scheduler queues based on mbuf sched field value and transmit the
- * scheduled packets out through the hard device interface.
- *
- * @param portid
- * port id of the soft device.
+ * @param port_id
+ * Port ID of the Soft NIC device.
* @return
- * zero.
+ * Zero on success, error code otherwise.
*/
-
int
rte_pmd_softnic_run(uint16_t port_id);
+/**
+ * Soft NIC manage.
+ *
+ * @param port_id
+ * Port ID of the Soft NIC device.
+ * @return
+ * Zero on success, error code otherwise.
+ */
+int __rte_experimental
+rte_pmd_softnic_manage(uint16_t port_id);
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/net/softnic/rte_eth_softnic_action.c b/drivers/net/softnic/rte_eth_softnic_action.c
new file mode 100644
index 00000000..c25f4dd9
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_action.c
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_string_fns.h>
+
+#include "hash_func.h"
+#include "rte_eth_softnic_internals.h"
+
+/**
+ * Input port
+ */
+int
+softnic_port_in_action_profile_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->port_in_action_profile_list);
+
+ return 0;
+}
+
+void
+softnic_port_in_action_profile_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_port_in_action_profile *profile;
+
+ profile = TAILQ_FIRST(&p->port_in_action_profile_list);
+ if (profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->port_in_action_profile_list, profile, node);
+ free(profile);
+ }
+}
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_port_in_action_profile *profile;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(profile, &p->port_in_action_profile_list, node)
+ if (strcmp(profile->name, name) == 0)
+ return profile;
+
+ return NULL;
+}
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_port_in_action_profile_params *params)
+{
+ struct softnic_port_in_action_profile *profile;
+ struct rte_port_in_action_profile *ap;
+ int status;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_port_in_action_profile_find(p, name) ||
+ params == NULL)
+ return NULL;
+
+ if ((params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) &&
+ params->lb.f_hash == NULL) {
+ switch (params->lb.key_size) {
+ case 8:
+ params->lb.f_hash = hash_default_key8;
+ break;
+
+ case 16:
+ params->lb.f_hash = hash_default_key16;
+ break;
+
+ case 24:
+ params->lb.f_hash = hash_default_key24;
+ break;
+
+ case 32:
+ params->lb.f_hash = hash_default_key32;
+ break;
+
+ case 40:
+ params->lb.f_hash = hash_default_key40;
+ break;
+
+ case 48:
+ params->lb.f_hash = hash_default_key48;
+ break;
+
+ case 56:
+ params->lb.f_hash = hash_default_key56;
+ break;
+
+ case 64:
+ params->lb.f_hash = hash_default_key64;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ params->lb.seed = 0;
+ }
+
+ /* Resource */
+ ap = rte_port_in_action_profile_create(0);
+ if (ap == NULL)
+ return NULL;
+
+ if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_FLTR)) {
+ status = rte_port_in_action_profile_action_register(ap,
+ RTE_PORT_IN_ACTION_FLTR,
+ &params->fltr);
+
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) {
+ status = rte_port_in_action_profile_action_register(ap,
+ RTE_PORT_IN_ACTION_LB,
+ &params->lb);
+
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ status = rte_port_in_action_profile_freeze(ap);
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node allocation */
+ profile = calloc(1, sizeof(struct softnic_port_in_action_profile));
+ if (profile == NULL) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(profile->name, name, sizeof(profile->name));
+ memcpy(&profile->params, params, sizeof(*params));
+ profile->ap = ap;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->port_in_action_profile_list, profile, node);
+
+ return profile;
+}
+
+/**
+ * Table
+ */
+int
+softnic_table_action_profile_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->table_action_profile_list);
+
+ return 0;
+}
+
+void
+softnic_table_action_profile_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_table_action_profile *profile;
+
+ profile = TAILQ_FIRST(&p->table_action_profile_list);
+ if (profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->table_action_profile_list, profile, node);
+ free(profile);
+ }
+}
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_table_action_profile *profile;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(profile, &p->table_action_profile_list, node)
+ if (strcmp(profile->name, name) == 0)
+ return profile;
+
+ return NULL;
+}
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_table_action_profile_params *params)
+{
+ struct softnic_table_action_profile *profile;
+ struct rte_table_action_profile *ap;
+ int status;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_table_action_profile_find(p, name) ||
+ params == NULL ||
+ ((params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) == 0))
+ return NULL;
+
+ if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) &&
+ params->lb.f_hash == NULL) {
+ switch (params->lb.key_size) {
+ case 8:
+ params->lb.f_hash = hash_default_key8;
+ break;
+
+ case 16:
+ params->lb.f_hash = hash_default_key16;
+ break;
+
+ case 24:
+ params->lb.f_hash = hash_default_key24;
+ break;
+
+ case 32:
+ params->lb.f_hash = hash_default_key32;
+ break;
+
+ case 40:
+ params->lb.f_hash = hash_default_key40;
+ break;
+
+ case 48:
+ params->lb.f_hash = hash_default_key48;
+ break;
+
+ case 56:
+ params->lb.f_hash = hash_default_key56;
+ break;
+
+ case 64:
+ params->lb.f_hash = hash_default_key64;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ params->lb.seed = 0;
+ }
+
+ /* Resource */
+ ap = rte_table_action_profile_create(&params->common);
+ if (ap == NULL)
+ return NULL;
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_FWD,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_LB,
+ &params->lb);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_MTR,
+ &params->mtr);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TM,
+ &params->tm);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_ENCAP,
+ &params->encap);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_NAT,
+ &params->nat);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TTL,
+ &params->ttl);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_STATS,
+ &params->stats);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TIME,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ status = rte_table_action_profile_freeze(ap);
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node allocation */
+ profile = calloc(1, sizeof(struct softnic_table_action_profile));
+ if (profile == NULL) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(profile->name, name, sizeof(profile->name));
+ memcpy(&profile->params, params, sizeof(*params));
+ profile->ap = ap;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->table_action_profile_list, profile, node);
+
+ return profile;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_cli.c b/drivers/net/softnic/rte_eth_softnic_cli.c
new file mode 100644
index 00000000..0c7448cc
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_cli.c
@@ -0,0 +1,5259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "parser.h"
+
+#ifndef CMD_MAX_TOKENS
+#define CMD_MAX_TOKENS 256
+#endif
+
+#define MSG_OUT_OF_MEMORY "Not enough memory.\n"
+#define MSG_CMD_UNKNOWN "Unknown command \"%s\".\n"
+#define MSG_CMD_UNIMPLEM "Command \"%s\" not implemented.\n"
+#define MSG_ARG_NOT_ENOUGH "Not enough arguments for command \"%s\".\n"
+#define MSG_ARG_TOO_MANY "Too many arguments for command \"%s\".\n"
+#define MSG_ARG_MISMATCH "Wrong number of arguments for command \"%s\".\n"
+#define MSG_ARG_NOT_FOUND "Argument \"%s\" not found.\n"
+#define MSG_ARG_INVALID "Invalid value for argument \"%s\".\n"
+#define MSG_FILE_ERR "Error in file \"%s\" at line %u.\n"
+#define MSG_FILE_NOT_ENOUGH "Not enough rules in file \"%s\".\n"
+#define MSG_CMD_FAIL "Command \"%s\" failed.\n"
+
+static int
+is_comment(char *in)
+{
+ if ((strlen(in) && index("!#%;", in[0])) ||
+ (strncmp(in, "//", 2) == 0) ||
+ (strncmp(in, "--", 2) == 0))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * mempool <mempool_name>
+ * buffer <buffer_size>
+ * pool <pool_size>
+ * cache <cache_size>
+ */
+static void
+cmd_mempool(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_mempool_params p;
+ char *name;
+ struct softnic_mempool *mempool;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "buffer") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buffer");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.buffer_size, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "buffer_size");
+ return;
+ }
+
+ if (strcmp(tokens[4], "pool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pool");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.pool_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pool_size");
+ return;
+ }
+
+ if (strcmp(tokens[6], "cache") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cache");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.cache_size, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cache_size");
+ return;
+ }
+
+ mempool = softnic_mempool_create(softnic, name, &p);
+ if (mempool == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * link <link_name>
+ * dev <device_name> | port <port_id>
+ */
+static void
+cmd_link(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_link_params p;
+ struct softnic_link *link;
+ char *name;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens != 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "dev") == 0) {
+ p.dev_name = tokens[3];
+ } else if (strcmp(tokens[2], "port") == 0) {
+ p.dev_name = NULL;
+
+ if (softnic_parser_read_uint16(&p.port_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dev or port");
+ return;
+ }
+
+ link = softnic_link_create(softnic, name, &p);
+ if (link == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * swq <swq_name>
+ * size <size>
+ */
+static void
+cmd_swq(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_swq_params p;
+ char *name;
+ struct softnic_swq *swq;
+
+ if (n_tokens != 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.size, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "size");
+ return;
+ }
+
+ swq = softnic_swq_create(softnic, name, &p);
+ if (swq == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr shaper profile
+ * id <profile_id>
+ * rate <tb_rate> size <tb_size>
+ * adj <packet_length_adjust>
+ */
+static void
+cmd_tmgr_shaper_profile(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_shaper_params sp;
+ struct rte_tm_error error;
+ uint32_t shaper_profile_id;
+ uint16_t port_id;
+ int status;
+
+ memset(&sp, 0, sizeof(struct rte_tm_shaper_params));
+
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[2], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (strcmp(tokens[3], "id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shaper_profile_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "rate") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rate");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&sp.peak.rate, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tb_rate");
+ return;
+ }
+
+ if (strcmp(tokens[7], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&sp.peak.size, tokens[8]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tb_size");
+ return;
+ }
+
+ if (strcmp(tokens[9], "adj") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "adj");
+ return;
+ }
+
+ if (softnic_parser_read_int32(&sp.pkt_length_adjust, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "packet_length_adjust");
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return;
+
+ status = rte_tm_shaper_profile_add(port_id, shaper_profile_id, &sp, &error);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr shared shaper
+ * id <shared_shaper_id>
+ * profile <shaper_profile_id>
+ */
+static void
+cmd_tmgr_shared_shaper(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_error error;
+ uint32_t shared_shaper_id, shaper_profile_id;
+ uint16_t port_id;
+ int status;
+
+ if (n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "shared") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared");
+ return;
+ }
+
+ if (strcmp(tokens[2], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[3], "id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shared_shaper_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shaper_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id");
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return;
+
+ status = rte_tm_shared_shaper_add_update(port_id,
+ shared_shaper_id,
+ shaper_profile_id,
+ &error);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr node
+ * id <node_id>
+ * parent <parent_node_id | none>
+ * priority <priority>
+ * weight <weight>
+ * [shaper profile <shaper_profile_id>]
+ * [shared shaper <shared_shaper_id>]
+ * [nonleaf sp <n_sp_priorities>]
+ */
+static void
+cmd_tmgr_node(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_error error;
+ struct rte_tm_node_params np;
+ uint32_t node_id, parent_node_id, priority, weight, shared_shaper_id;
+ uint16_t port_id;
+ int status;
+
+ memset(&np, 0, sizeof(struct rte_tm_node_params));
+ np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
+ np.nonleaf.n_sp_priorities = 1;
+
+ if (n_tokens < 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "node") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "node");
+ return;
+ }
+
+ if (strcmp(tokens[2], "id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&node_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "node_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "parent") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "parent");
+ return;
+ }
+
+ if (strcmp(tokens[5], "none") == 0)
+ parent_node_id = RTE_TM_NODE_ID_NULL;
+ else {
+ if (softnic_parser_read_uint32(&parent_node_id, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "parent_node_id");
+ return;
+ }
+ }
+
+ if (strcmp(tokens[6], "priority") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&priority, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "priority");
+ return;
+ }
+
+ if (strcmp(tokens[8], "weight") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&weight, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "weight");
+ return;
+ }
+
+ tokens += 10;
+ n_tokens -= 10;
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[0], "shaper") == 0) &&
+ (strcmp(tokens[1], "profile") == 0)) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node");
+ return;
+ }
+
+ if (strcmp(tokens[2], "none") == 0) {
+ np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
+ } else {
+ if (softnic_parser_read_uint32(&np.shaper_profile_id, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id");
+ return;
+ }
+ }
+
+ tokens += 3;
+ n_tokens -= 3;
+ } /* shaper profile */
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[0], "shared") == 0) &&
+ (strcmp(tokens[1], "shaper") == 0)) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shared_shaper_id, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id");
+ return;
+ }
+
+ np.shared_shaper_id = &shared_shaper_id;
+ np.n_shared_shapers = 1;
+
+ tokens += 3;
+ n_tokens -= 3;
+ } /* shared shaper */
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[0], "nonleaf") == 0) &&
+ (strcmp(tokens[1], "sp") == 0)) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&np.nonleaf.n_sp_priorities, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_sp_priorities");
+ return;
+ }
+
+ tokens += 3;
+ n_tokens -= 3;
+ } /* nonleaf sp <n_sp_priorities> */
+
+ if (n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status != 0)
+ return;
+
+ status = rte_tm_node_add(port_id,
+ node_id,
+ parent_node_id,
+ priority,
+ weight,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &np,
+ &error);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static uint32_t
+root_node_id(uint32_t n_spp,
+ uint32_t n_pps)
+{
+ uint32_t n_queues = n_spp * n_pps * RTE_SCHED_QUEUES_PER_PIPE;
+ uint32_t n_tc = n_spp * n_pps * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_pipes = n_spp * n_pps;
+
+ return n_queues + n_tc + n_pipes + n_spp;
+}
+
+static uint32_t
+subport_node_id(uint32_t n_spp,
+ uint32_t n_pps,
+ uint32_t subport_id)
+{
+ uint32_t n_pipes = n_spp * n_pps;
+ uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+ return n_queues + n_tc + n_pipes + subport_id;
+}
+
+static uint32_t
+pipe_node_id(uint32_t n_spp,
+ uint32_t n_pps,
+ uint32_t subport_id,
+ uint32_t pipe_id)
+{
+ uint32_t n_pipes = n_spp * n_pps;
+ uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+ return n_queues +
+ n_tc +
+ pipe_id +
+ subport_id * n_pps;
+}
+
+static uint32_t
+tc_node_id(uint32_t n_spp,
+ uint32_t n_pps,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ uint32_t tc_id)
+{
+ uint32_t n_pipes = n_spp * n_pps;
+ uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+ return n_queues +
+ tc_id +
+ (pipe_id + subport_id * n_pps) * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+}
+
+static uint32_t
+queue_node_id(uint32_t n_spp __rte_unused,
+ uint32_t n_pps,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ uint32_t tc_id,
+ uint32_t queue_id)
+{
+ return queue_id +
+ tc_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE +
+ (pipe_id + subport_id * n_pps) * RTE_SCHED_QUEUES_PER_PIPE;
+}
+
+struct tmgr_hierarchy_default_params {
+ uint32_t n_spp; /**< Number of subports per port. */
+ uint32_t n_pps; /**< Number of pipes per subport. */
+
+ struct {
+ uint32_t port;
+ uint32_t subport;
+ uint32_t pipe;
+ uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ } shaper_profile_id;
+
+ struct {
+ uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t tc_valid[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ } shared_shaper_id;
+
+ struct {
+ uint32_t queue[RTE_SCHED_QUEUES_PER_PIPE];
+ } weight;
+};
+
+static int
+tmgr_hierarchy_default(struct pmd_internals *softnic,
+ struct tmgr_hierarchy_default_params *params)
+{
+ struct rte_tm_node_params root_node_params = {
+ .shaper_profile_id = params->shaper_profile_id.port,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ };
+
+ struct rte_tm_node_params subport_node_params = {
+ .shaper_profile_id = params->shaper_profile_id.subport,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ };
+
+ struct rte_tm_node_params pipe_node_params = {
+ .shaper_profile_id = params->shaper_profile_id.pipe,
+ .nonleaf = {
+ .n_sp_priorities = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ },
+ };
+
+ struct rte_tm_node_params tc_node_params[] = {
+ [0] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[0],
+ .shared_shaper_id = &params->shared_shaper_id.tc[0],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[0]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+
+ [1] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[1],
+ .shared_shaper_id = &params->shared_shaper_id.tc[1],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[1]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+
+ [2] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[2],
+ .shared_shaper_id = &params->shared_shaper_id.tc[2],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[2]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+
+ [3] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[3],
+ .shared_shaper_id = &params->shared_shaper_id.tc[3],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[3]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+ };
+
+ struct rte_tm_node_params queue_node_params = {
+ .shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE,
+ };
+
+ struct rte_tm_error error;
+ uint32_t n_spp = params->n_spp, n_pps = params->n_pps, s;
+ int status;
+ uint16_t port_id;
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 0: Root node */
+ status = rte_tm_node_add(port_id,
+ root_node_id(n_spp, n_pps),
+ RTE_TM_NODE_ID_NULL,
+ 0,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &root_node_params,
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 1: Subport nodes */
+ for (s = 0; s < params->n_spp; s++) {
+ uint32_t p;
+
+ status = rte_tm_node_add(port_id,
+ subport_node_id(n_spp, n_pps, s),
+ root_node_id(n_spp, n_pps),
+ 0,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &subport_node_params,
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 2: Pipe nodes */
+ for (p = 0; p < params->n_pps; p++) {
+ uint32_t t;
+
+ status = rte_tm_node_add(port_id,
+ pipe_node_id(n_spp, n_pps, s, p),
+ subport_node_id(n_spp, n_pps, s),
+ 0,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &pipe_node_params,
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 3: Traffic class nodes */
+ for (t = 0; t < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; t++) {
+ uint32_t q;
+
+ status = rte_tm_node_add(port_id,
+ tc_node_id(n_spp, n_pps, s, p, t),
+ pipe_node_id(n_spp, n_pps, s, p),
+ t,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &tc_node_params[t],
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 4: Queue nodes */
+ for (q = 0; q < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; q++) {
+ status = rte_tm_node_add(port_id,
+ queue_node_id(n_spp, n_pps, s, p, t, q),
+ tc_node_id(n_spp, n_pps, s, p, t),
+ 0,
+ params->weight.queue[q],
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &queue_node_params,
+ &error);
+ if (status)
+ return -1;
+ } /* Queue */
+ } /* TC */
+ } /* Pipe */
+ } /* Subport */
+
+ return 0;
+}
+
+
+/**
+ * tmgr hierarchy-default
+ * spp <n_subports_per_port>
+ * pps <n_pipes_per_subport>
+ * shaper profile
+ * port <profile_id>
+ * subport <profile_id>
+ * pipe <profile_id>
+ * tc0 <profile_id>
+ * tc1 <profile_id>
+ * tc2 <profile_id>
+ * tc3 <profile_id>
+ * shared shaper
+ * tc0 <id | none>
+ * tc1 <id | none>
+ * tc2 <id | none>
+ * tc3 <id | none>
+ * weight
+ * queue <q0> ... <q15>
+ */
+static void
+cmd_tmgr_hierarchy_default(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct tmgr_hierarchy_default_params p;
+ int i, status;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens != 50) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "hierarchy-default") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy-default");
+ return;
+ }
+
+ if (strcmp(tokens[2], "spp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.n_spp, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_subports_per_port");
+ return;
+ }
+
+ if (strcmp(tokens[4], "pps") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.n_pps, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_pipes_per_subport");
+ return;
+ }
+
+ /* Shaper profile */
+
+ if (strcmp(tokens[6], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[7], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (strcmp(tokens[8], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.port, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port profile id");
+ return;
+ }
+
+ if (strcmp(tokens[10], "subport") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "subport");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.subport, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "subport profile id");
+ return;
+ }
+
+ if (strcmp(tokens[12], "pipe") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipe");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.pipe, tokens[13]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pipe_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[14], "tc0") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[0], tokens[15]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc0 profile id");
+ return;
+ }
+
+ if (strcmp(tokens[16], "tc1") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[1], tokens[17]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc1 profile id");
+ return;
+ }
+
+ if (strcmp(tokens[18], "tc2") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[2], tokens[19]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc2 profile id");
+ return;
+ }
+
+ if (strcmp(tokens[20], "tc3") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[3], tokens[21]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc3 profile id");
+ return;
+ }
+
+ /* Shared shaper */
+
+ if (strcmp(tokens[22], "shared") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared");
+ return;
+ }
+
+ if (strcmp(tokens[23], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[24], "tc0") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0");
+ return;
+ }
+
+ if (strcmp(tokens[25], "none") == 0)
+ p.shared_shaper_id.tc_valid[0] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], tokens[25]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc0");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[0] = 1;
+ }
+
+ if (strcmp(tokens[26], "tc1") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1");
+ return;
+ }
+
+ if (strcmp(tokens[27], "none") == 0)
+ p.shared_shaper_id.tc_valid[1] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], tokens[27]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc1");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[1] = 1;
+ }
+
+ if (strcmp(tokens[28], "tc2") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2");
+ return;
+ }
+
+ if (strcmp(tokens[29], "none") == 0)
+ p.shared_shaper_id.tc_valid[2] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], tokens[29]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc2");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[2] = 1;
+ }
+
+ if (strcmp(tokens[30], "tc3") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3");
+ return;
+ }
+
+ if (strcmp(tokens[31], "none") == 0)
+ p.shared_shaper_id.tc_valid[3] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], tokens[31]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc3");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[3] = 1;
+ }
+
+ /* Weight */
+
+ if (strcmp(tokens[32], "weight") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight");
+ return;
+ }
+
+ if (strcmp(tokens[33], "queue") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "queue");
+ return;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (softnic_parser_read_uint32(&p.weight.queue[i], tokens[34 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "weight queue");
+ return;
+ }
+ }
+
+ status = tmgr_hierarchy_default(softnic, &p);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr hierarchy commit
+ */
+static void
+cmd_tmgr_hierarchy_commit(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_error error;
+ uint16_t port_id;
+ int status;
+
+ if (n_tokens != 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "hierarchy") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy");
+ return;
+ }
+
+ if (strcmp(tokens[2], "commit") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "commit");
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status != 0)
+ return;
+
+ status = rte_tm_hierarchy_commit(port_id, 1, &error);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr <tmgr_name>
+ */
+static void
+cmd_tmgr(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *name;
+ struct softnic_tmgr_port *tmgr_port;
+
+ if (n_tokens != 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ tmgr_port = softnic_tmgr_port_create(softnic, name);
+ if (tmgr_port == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tap <tap_name>
+ */
+static void
+cmd_tap(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *name;
+ struct softnic_tap *tap;
+
+ if (n_tokens != 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ tap = softnic_tap_create(softnic, name);
+ if (tap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * port in action profile <profile_name>
+ * [filter match | mismatch offset <key_offset> mask <key_mask> key <key_value> port <port_id>]
+ * [balance offset <key_offset> mask <key_mask> port <port_id0> ... <port_id15>]
+ */
+static void
+cmd_port_in_action_profile(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_port_in_action_profile_params p;
+ struct softnic_port_in_action_profile *ap;
+ char *name;
+ uint32_t t0;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (strcmp(tokens[2], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action");
+ return;
+ }
+
+ if (strcmp(tokens[3], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ name = tokens[4];
+
+ t0 = 5;
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "filter") == 0)) {
+ uint32_t size;
+
+ if (n_tokens < t0 + 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "port in action profile filter");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "match") == 0) {
+ p.fltr.filter_on_match = 1;
+ } else if (strcmp(tokens[t0 + 1], "mismatch") == 0) {
+ p.fltr.filter_on_match = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match or mismatch");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.fltr.key_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE;
+ if ((softnic_parse_hex_string(tokens[t0 + 5],
+ p.fltr.key_mask, &size) != 0) ||
+ size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 6], "key") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key");
+ return;
+ }
+
+ size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE;
+ if ((softnic_parse_hex_string(tokens[t0 + 7],
+ p.fltr.key, &size) != 0) ||
+ size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_value");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 8], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.fltr.port_id,
+ tokens[t0 + 9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_FLTR;
+ t0 += 10;
+ } /* filter */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "balance") == 0)) {
+ uint32_t i;
+
+ if (n_tokens < t0 + 22) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "port in action profile balance");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.lb.key_offset,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX;
+ if (softnic_parse_hex_string(tokens[t0 + 4],
+ p.lb.key_mask, &p.lb.key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ for (i = 0; i < 16; i++)
+ if (softnic_parser_read_uint32(&p.lb.port_id[i],
+ tokens[t0 + 6 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_LB;
+ t0 += 22;
+ } /* balance */
+
+ if (t0 < n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ ap = softnic_port_in_action_profile_create(softnic, name, &p);
+ if (ap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * table action profile <profile_name>
+ * ipv4 | ipv6
+ * offset <ip_offset>
+ * fwd
+ * [balance offset <key_offset> mask <key_mask> outoffset <out_offset>]
+ * [meter srtcm | trtcm
+ * tc <n_tc>
+ * stats none | pkts | bytes | both]
+ * [tm spp <n_subports_per_port> pps <n_pipes_per_subport>]
+ * [encap ether | vlan | qinq | mpls | pppoe]
+ * [nat src | dst
+ * proto udp | tcp]
+ * [ttl drop | fwd
+ * stats none | pkts]
+ * [stats pkts | bytes | both]
+ * [time]
+ */
+static void
+cmd_table_action_profile(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_action_profile_params p;
+ struct softnic_table_action_profile *ap;
+ char *name;
+ uint32_t t0;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action");
+ return;
+ }
+
+ if (strcmp(tokens[2], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ name = tokens[3];
+
+ if (strcmp(tokens[4], "ipv4") == 0) {
+ p.common.ip_version = 1;
+ } else if (strcmp(tokens[4], "ipv6") == 0) {
+ p.common.ip_version = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[5], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.common.ip_offset,
+ tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ip_offset");
+ return;
+ }
+
+ if (strcmp(tokens[7], "fwd") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "fwd");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
+
+ t0 = 8;
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "balance") == 0)) {
+ if (n_tokens < t0 + 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "table action profile balance");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.lb.key_offset,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX;
+ if (softnic_parse_hex_string(tokens[t0 + 4],
+ p.lb.key_mask, &p.lb.key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "outoffset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "outoffset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.lb.out_offset,
+ tokens[t0 + 6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "out_offset");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_LB;
+ t0 += 7;
+ } /* balance */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "meter") == 0)) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile meter");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "srtcm") == 0) {
+ p.mtr.alg = RTE_TABLE_ACTION_METER_SRTCM;
+ } else if (strcmp(tokens[t0 + 1], "trtcm") == 0) {
+ p.mtr.alg = RTE_TABLE_ACTION_METER_TRTCM;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "srtcm or trtcm");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "tc") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.mtr.n_tc,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_tc");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "none") == 0) {
+ p.mtr.n_packets_enabled = 0;
+ p.mtr.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 5], "pkts") == 0) {
+ p.mtr.n_packets_enabled = 1;
+ p.mtr.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 5], "bytes") == 0) {
+ p.mtr.n_packets_enabled = 0;
+ p.mtr.n_bytes_enabled = 1;
+ } else if (strcmp(tokens[t0 + 5], "both") == 0) {
+ p.mtr.n_packets_enabled = 1;
+ p.mtr.n_bytes_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "none or pkts or bytes or both");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_MTR;
+ t0 += 6;
+ } /* meter */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "tm") == 0)) {
+ if (n_tokens < t0 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile tm");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "spp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.tm.n_subports_per_port,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_subports_per_port");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "pps") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.tm.n_pipes_per_subport,
+ tokens[t0 + 4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_pipes_per_subport");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TM;
+ t0 += 5;
+ } /* tm */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "encap") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "action profile encap");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "ether") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER;
+ } else if (strcmp(tokens[t0 + 1], "vlan") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN;
+ } else if (strcmp(tokens[t0 + 1], "qinq") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ;
+ } else if (strcmp(tokens[t0 + 1], "mpls") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS;
+ } else if (strcmp(tokens[t0 + 1], "pppoe") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE;
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "encap");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_ENCAP;
+ t0 += 2;
+ } /* encap */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "nat") == 0)) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile nat");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "src") == 0) {
+ p.nat.source_nat = 1;
+ } else if (strcmp(tokens[t0 + 1], "dst") == 0) {
+ p.nat.source_nat = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "src or dst");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "proto") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "proto");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "tcp") == 0) {
+ p.nat.proto = 0x06;
+ } else if (strcmp(tokens[t0 + 3], "udp") == 0) {
+ p.nat.proto = 0x11;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "tcp or udp");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_NAT;
+ t0 += 4;
+ } /* nat */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "ttl") == 0)) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile ttl");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "drop") == 0) {
+ p.ttl.drop = 1;
+ } else if (strcmp(tokens[t0 + 1], "fwd") == 0) {
+ p.ttl.drop = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "drop or fwd");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "none") == 0) {
+ p.ttl.n_packets_enabled = 0;
+ } else if (strcmp(tokens[t0 + 3], "pkts") == 0) {
+ p.ttl.n_packets_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "none or pkts");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TTL;
+ t0 += 4;
+ } /* ttl */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "stats") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "pkts") == 0) {
+ p.stats.n_packets_enabled = 1;
+ p.stats.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 1], "bytes") == 0) {
+ p.stats.n_packets_enabled = 0;
+ p.stats.n_bytes_enabled = 1;
+ } else if (strcmp(tokens[t0 + 1], "both") == 0) {
+ p.stats.n_packets_enabled = 1;
+ p.stats.n_bytes_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "pkts or bytes or both");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_STATS;
+ t0 += 2;
+ } /* stats */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "time") == 0)) {
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TIME;
+ t0 += 1;
+ } /* time */
+
+ if (t0 < n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ ap = softnic_table_action_profile_create(softnic, name, &p);
+ if (ap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name>
+ * period <timer_period_ms>
+ * offset_port_id <offset_port_id>
+ */
+static void
+cmd_pipeline(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct pipeline_params p;
+ char *name;
+ struct pipeline *pipeline;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "period") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "period");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.timer_period_ms,
+ tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "timer_period_ms");
+ return;
+ }
+
+ if (strcmp(tokens[4], "offset_port_id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset_port_id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.offset_port_id,
+ tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "offset_port_id");
+ return;
+ }
+
+ pipeline = softnic_pipeline_create(softnic, name, &p);
+ if (pipeline == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in
+ * bsz <burst_size>
+ * link <link_name> rxq <queue_id>
+ * | swq <swq_name>
+ * | tmgr <tmgr_name>
+ * | tap <tap_name> mempool <mempool_name> mtu <mtu>
+ * | source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt>
+ * [action <port_in_action_profile_name>]
+ * [disabled]
+ */
+static void
+cmd_pipeline_port_in(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_port_in_params p;
+ char *pipeline_name;
+ uint32_t t0;
+ int enabled, status;
+
+ if (n_tokens < 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (strcmp(tokens[4], "bsz") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "burst_size");
+ return;
+ }
+
+ t0 = 6;
+
+ if (strcmp(tokens[t0], "link") == 0) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in link");
+ return;
+ }
+
+ p.type = PORT_IN_RXQ;
+
+ p.dev_name = tokens[t0 + 1];
+
+ if (strcmp(tokens[t0 + 2], "rxq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq");
+ return;
+ }
+
+ if (softnic_parser_read_uint16(&p.rxq.queue_id,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "queue_id");
+ return;
+ }
+ t0 += 4;
+ } else if (strcmp(tokens[t0], "swq") == 0) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in swq");
+ return;
+ }
+
+ p.type = PORT_IN_SWQ;
+
+ p.dev_name = tokens[t0 + 1];
+
+ t0 += 2;
+ } else if (strcmp(tokens[t0], "tmgr") == 0) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in tmgr");
+ return;
+ }
+
+ p.type = PORT_IN_TMGR;
+
+ p.dev_name = tokens[t0 + 1];
+
+ t0 += 2;
+ } else if (strcmp(tokens[t0], "tap") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in tap");
+ return;
+ }
+
+ p.type = PORT_IN_TAP;
+
+ p.dev_name = tokens[t0 + 1];
+
+ if (strcmp(tokens[t0 + 2], "mempool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mempool");
+ return;
+ }
+
+ p.tap.mempool_name = tokens[t0 + 3];
+
+ if (strcmp(tokens[t0 + 4], "mtu") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mtu");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.tap.mtu,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "mtu");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "source") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in source");
+ return;
+ }
+
+ p.type = PORT_IN_SOURCE;
+
+ p.dev_name = NULL;
+
+ if (strcmp(tokens[t0 + 1], "mempool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mempool");
+ return;
+ }
+
+ p.source.mempool_name = tokens[t0 + 2];
+
+ if (strcmp(tokens[t0 + 3], "file") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "file");
+ return;
+ }
+
+ p.source.file_name = tokens[t0 + 4];
+
+ if (strcmp(tokens[t0 + 5], "bpp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "bpp");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.source.n_bytes_per_pkt,
+ tokens[t0 + 6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_bytes_per_pkt");
+ return;
+ }
+
+ t0 += 7;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ p.action_profile_name = NULL;
+ if (n_tokens > t0 &&
+ (strcmp(tokens[t0], "action") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "action");
+ return;
+ }
+
+ p.action_profile_name = tokens[t0 + 1];
+
+ t0 += 2;
+ }
+
+ enabled = 1;
+ if (n_tokens > t0 &&
+ (strcmp(tokens[t0], "disabled") == 0)) {
+ enabled = 0;
+
+ t0 += 1;
+ }
+
+ if (n_tokens != t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_port_in_create(softnic,
+ pipeline_name,
+ &p,
+ enabled);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port out
+ * bsz <burst_size>
+ * link <link_name> txq <txq_id>
+ * | swq <swq_name>
+ * | tmgr <tmgr_name>
+ * | tap <tap_name>
+ * | sink [file <file_name> pkts <max_n_pkts>]
+ */
+static void
+cmd_pipeline_port_out(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_port_out_params p;
+ char *pipeline_name;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "out") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out");
+ return;
+ }
+
+ if (strcmp(tokens[4], "bsz") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "burst_size");
+ return;
+ }
+
+ if (strcmp(tokens[6], "link") == 0) {
+ if (n_tokens != 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out link");
+ return;
+ }
+
+ p.type = PORT_OUT_TXQ;
+
+ p.dev_name = tokens[7];
+
+ if (strcmp(tokens[8], "txq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq");
+ return;
+ }
+
+ if (softnic_parser_read_uint16(&p.txq.queue_id,
+ tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_id");
+ return;
+ }
+ } else if (strcmp(tokens[6], "swq") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out swq");
+ return;
+ }
+
+ p.type = PORT_OUT_SWQ;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "tmgr") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out tmgr");
+ return;
+ }
+
+ p.type = PORT_OUT_TMGR;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "tap") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out tap");
+ return;
+ }
+
+ p.type = PORT_OUT_TAP;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "sink") == 0) {
+ if ((n_tokens != 7) && (n_tokens != 11)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out sink");
+ return;
+ }
+
+ p.type = PORT_OUT_SINK;
+
+ p.dev_name = NULL;
+
+ if (n_tokens == 7) {
+ p.sink.file_name = NULL;
+ p.sink.max_n_pkts = 0;
+ } else {
+ if (strcmp(tokens[7], "file") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "file");
+ return;
+ }
+
+ p.sink.file_name = tokens[8];
+
+ if (strcmp(tokens[9], "pkts") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pkts");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.sink.max_n_pkts,
+ tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "max_n_pkts");
+ return;
+ }
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_port_out_create(softnic, pipeline_name, &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table
+ * match
+ * acl
+ * ipv4 | ipv6
+ * offset <ip_header_offset>
+ * size <n_rules>
+ * | array
+ * offset <key_offset>
+ * size <n_keys>
+ * | hash
+ * ext | lru
+ * key <key_size>
+ * mask <key_mask>
+ * offset <key_offset>
+ * buckets <n_buckets>
+ * size <n_keys>
+ * | lpm
+ * ipv4 | ipv6
+ * offset <ip_header_offset>
+ * size <n_rules>
+ * | stub
+ * [action <table_action_profile_name>]
+ */
+static void
+cmd_pipeline_table(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ uint8_t key_mask[TABLE_RULE_MATCH_SIZE_MAX];
+ struct softnic_table_params p;
+ char *pipeline_name;
+ uint32_t t0;
+ int status;
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (strcmp(tokens[3], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match");
+ return;
+ }
+
+ t0 = 4;
+ if (strcmp(tokens[t0], "acl") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table acl");
+ return;
+ }
+
+ p.match_type = TABLE_ACL;
+
+ if (strcmp(tokens[t0 + 1], "ipv4") == 0) {
+ p.match.acl.ip_version = 1;
+ } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) {
+ p.match.acl.ip_version = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.acl.ip_header_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "ip_header_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.acl.n_rules,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "array") == 0) {
+ if (n_tokens < t0 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table array");
+ return;
+ }
+
+ p.match_type = TABLE_ARRAY;
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.array.key_offset,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.array.n_keys,
+ tokens[t0 + 4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_keys");
+ return;
+ }
+
+ t0 += 5;
+ } else if (strcmp(tokens[t0], "hash") == 0) {
+ uint32_t key_mask_size = TABLE_RULE_MATCH_SIZE_MAX;
+
+ if (n_tokens < t0 + 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table hash");
+ return;
+ }
+
+ p.match_type = TABLE_HASH;
+
+ if (strcmp(tokens[t0 + 1], "ext") == 0) {
+ p.match.hash.extendable_bucket = 1;
+ } else if (strcmp(tokens[t0 + 1], "lru") == 0) {
+ p.match.hash.extendable_bucket = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ext or lru");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "key") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key");
+ return;
+ }
+
+ if ((softnic_parser_read_uint32(&p.match.hash.key_size,
+ tokens[t0 + 3]) != 0) ||
+ p.match.hash.key_size == 0 ||
+ p.match.hash.key_size > TABLE_RULE_MATCH_SIZE_MAX) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_size");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ if ((softnic_parse_hex_string(tokens[t0 + 5],
+ key_mask, &key_mask_size) != 0) ||
+ key_mask_size != p.match.hash.key_size) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+ p.match.hash.key_mask = key_mask;
+
+ if (strcmp(tokens[t0 + 6], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.hash.key_offset,
+ tokens[t0 + 7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 8], "buckets") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buckets");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.hash.n_buckets,
+ tokens[t0 + 9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_buckets");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 10], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.hash.n_keys,
+ tokens[t0 + 11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_keys");
+ return;
+ }
+
+ t0 += 12;
+ } else if (strcmp(tokens[t0], "lpm") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table lpm");
+ return;
+ }
+
+ p.match_type = TABLE_LPM;
+
+ if (strcmp(tokens[t0 + 1], "ipv4") == 0) {
+ p.match.lpm.key_size = 4;
+ } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) {
+ p.match.lpm.key_size = 16;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.lpm.key_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.lpm.n_rules,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "stub") == 0) {
+ p.match_type = TABLE_STUB;
+
+ t0 += 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ p.action_profile_name = NULL;
+ if (n_tokens > t0 &&
+ (strcmp(tokens[t0], "action") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "action");
+ return;
+ }
+
+ p.action_profile_name = tokens[t0 + 1];
+
+ t0 += 2;
+ }
+
+ if (n_tokens > t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_create(softnic, pipeline_name, &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> table <table_id>
+ */
+static void
+cmd_pipeline_port_in_table(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id, table_id;
+ int status;
+
+ if (n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ status = softnic_pipeline_port_in_connect_to_table(softnic,
+ pipeline_name,
+ port_id,
+ table_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> stats read [clear]
+ */
+
+#define MSG_PIPELINE_PORT_IN_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts dropped by AH: %" PRIu64 "\n" \
+ "Pkts dropped by other: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_port_in_stats(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_port_in_stats stats;
+ char *pipeline_name;
+ uint32_t port_id;
+ int clear, status;
+
+ if (n_tokens != 7 &&
+ n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[6], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 8) {
+ if (strcmp(tokens[7], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = softnic_pipeline_port_in_stats_read(softnic,
+ pipeline_name,
+ port_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_PORT_IN_STATS,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> enable
+ */
+static void
+cmd_softnic_pipeline_port_in_enable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "enable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable");
+ return;
+ }
+
+ status = softnic_pipeline_port_in_enable(softnic, pipeline_name, port_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> disable
+ */
+static void
+cmd_softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "disable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable");
+ return;
+ }
+
+ status = softnic_pipeline_port_in_disable(softnic, pipeline_name, port_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port out <port_id> stats read [clear]
+ */
+#define MSG_PIPELINE_PORT_OUT_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts dropped by AH: %" PRIu64 "\n" \
+ "Pkts dropped by other: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_port_out_stats(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_port_out_stats stats;
+ char *pipeline_name;
+ uint32_t port_id;
+ int clear, status;
+
+ if (n_tokens != 7 &&
+ n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "out") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[6], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 8) {
+ if (strcmp(tokens[7], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = softnic_pipeline_port_out_stats_read(softnic,
+ pipeline_name,
+ port_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_PORT_OUT_STATS,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> stats read [clear]
+ */
+#define MSG_PIPELINE_TABLE_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts in with lookup miss: %" PRIu64 "\n" \
+ "Pkts in with lookup hit dropped by AH: %" PRIu64 "\n" \
+ "Pkts in with lookup hit dropped by others: %" PRIu64 "\n" \
+ "Pkts in with lookup miss dropped by AH: %" PRIu64 "\n" \
+ "Pkts in with lookup miss dropped by others: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_table_stats(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_table_stats stats;
+ char *pipeline_name;
+ uint32_t table_id;
+ int clear, status;
+
+ if (n_tokens != 6 &&
+ n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[5], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 7) {
+ if (strcmp(tokens[6], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = softnic_pipeline_table_stats_read(softnic,
+ pipeline_name,
+ table_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_TABLE_STATS,
+ stats.stats.n_pkts_in,
+ stats.stats.n_pkts_lookup_miss,
+ stats.n_pkts_dropped_by_lkp_hit_ah,
+ stats.n_pkts_dropped_lkp_hit,
+ stats.n_pkts_dropped_by_lkp_miss_ah,
+ stats.n_pkts_dropped_lkp_miss);
+}
+
+/**
+ * <match> ::=
+ *
+ * match
+ * acl
+ * priority <priority>
+ * ipv4 | ipv6 <sa> <sa_depth> <da> <da_depth>
+ * <sp0> <sp1> <dp0> <dp1> <proto>
+ * | array <pos>
+ * | hash
+ * raw <key>
+ * | ipv4_5tuple <sa> <da> <sp> <dp> <proto>
+ * | ipv6_5tuple <sa> <da> <sp> <dp> <proto>
+ * | ipv4_addr <addr>
+ * | ipv6_addr <addr>
+ * | qinq <svlan> <cvlan>
+ * | lpm
+ * ipv4 | ipv6 <addr> <depth>
+ */
+struct pkt_key_qinq {
+ uint16_t ethertype_svlan;
+ uint16_t svlan;
+ uint16_t ethertype_cvlan;
+ uint16_t cvlan;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv4_5tuple {
+ uint8_t time_to_live;
+ uint8_t proto;
+ uint16_t hdr_checksum;
+ uint32_t sa;
+ uint32_t da;
+ uint16_t sp;
+ uint16_t dp;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv6_5tuple {
+ uint16_t payload_length;
+ uint8_t proto;
+ uint8_t hop_limit;
+ uint8_t sa[16];
+ uint8_t da[16];
+ uint16_t sp;
+ uint16_t dp;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv4_addr {
+ uint32_t addr;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv6_addr {
+ uint8_t addr[16];
+} __attribute__((__packed__));
+
+static uint32_t
+parse_match(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size,
+ struct softnic_table_rule_match *m)
+{
+ memset(m, 0, sizeof(*m));
+
+ if (n_tokens < 2)
+ return 0;
+
+ if (strcmp(tokens[0], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match");
+ return 0;
+ }
+
+ if (strcmp(tokens[1], "acl") == 0) {
+ if (n_tokens < 14) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_ACL;
+
+ if (strcmp(tokens[2], "priority") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint32(&m->match.acl.priority,
+ tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "priority");
+ return 0;
+ }
+
+ if (strcmp(tokens[4], "ipv4") == 0) {
+ struct in_addr saddr, daddr;
+
+ m->match.acl.ip_version = 1;
+
+ if (softnic_parse_ipv4_addr(tokens[5], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+ m->match.acl.ipv4.sa = rte_be_to_cpu_32(saddr.s_addr);
+
+ if (softnic_parse_ipv4_addr(tokens[7], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+ m->match.acl.ipv4.da = rte_be_to_cpu_32(daddr.s_addr);
+ } else if (strcmp(tokens[4], "ipv6") == 0) {
+ struct in6_addr saddr, daddr;
+
+ m->match.acl.ip_version = 0;
+
+ if (softnic_parse_ipv6_addr(tokens[5], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+ memcpy(m->match.acl.ipv6.sa, saddr.s6_addr, 16);
+
+ if (softnic_parse_ipv6_addr(tokens[7], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+ memcpy(m->match.acl.ipv6.da, daddr.s6_addr, 16);
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint32(&m->match.acl.sa_depth,
+ tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa_depth");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint32(&m->match.acl.da_depth,
+ tokens[8]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da_depth");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.sp0, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp0");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.sp1, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp1");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.dp0, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp0");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.dp1, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp1");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&m->match.acl.proto, tokens[13]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "proto");
+ return 0;
+ }
+
+ m->match.acl.proto_mask = 0xff;
+
+ return 14;
+ } /* acl */
+
+ if (strcmp(tokens[1], "array") == 0) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_ARRAY;
+
+ if (softnic_parser_read_uint32(&m->match.array.pos, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pos");
+ return 0;
+ }
+
+ return 3;
+ } /* array */
+
+ if (strcmp(tokens[1], "hash") == 0) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_HASH;
+
+ if (strcmp(tokens[2], "raw") == 0) {
+ uint32_t key_size = TABLE_RULE_MATCH_SIZE_MAX;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_hex_string(tokens[3],
+ m->match.hash.key, &key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key");
+ return 0;
+ }
+
+ return 4;
+ } /* hash raw */
+
+ if (strcmp(tokens[2], "ipv4_5tuple") == 0) {
+ struct pkt_key_ipv4_5tuple *ipv4 =
+ (struct pkt_key_ipv4_5tuple *)m->match.hash.key;
+ struct in_addr saddr, daddr;
+ uint16_t sp, dp;
+ uint8_t proto;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv4_addr(tokens[3], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+
+ if (softnic_parse_ipv4_addr(tokens[4], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "proto");
+ return 0;
+ }
+
+ ipv4->sa = saddr.s_addr;
+ ipv4->da = daddr.s_addr;
+ ipv4->sp = rte_cpu_to_be_16(sp);
+ ipv4->dp = rte_cpu_to_be_16(dp);
+ ipv4->proto = proto;
+
+ return 8;
+ } /* hash ipv4_5tuple */
+
+ if (strcmp(tokens[2], "ipv6_5tuple") == 0) {
+ struct pkt_key_ipv6_5tuple *ipv6 =
+ (struct pkt_key_ipv6_5tuple *)m->match.hash.key;
+ struct in6_addr saddr, daddr;
+ uint16_t sp, dp;
+ uint8_t proto;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv6_addr(tokens[3], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+
+ if (softnic_parse_ipv6_addr(tokens[4], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "proto");
+ return 0;
+ }
+
+ memcpy(ipv6->sa, saddr.s6_addr, 16);
+ memcpy(ipv6->da, daddr.s6_addr, 16);
+ ipv6->sp = rte_cpu_to_be_16(sp);
+ ipv6->dp = rte_cpu_to_be_16(dp);
+ ipv6->proto = proto;
+
+ return 8;
+ } /* hash ipv6_5tuple */
+
+ if (strcmp(tokens[2], "ipv4_addr") == 0) {
+ struct pkt_key_ipv4_addr *ipv4_addr =
+ (struct pkt_key_ipv4_addr *)m->match.hash.key;
+ struct in_addr addr;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ ipv4_addr->addr = addr.s_addr;
+
+ return 4;
+ } /* hash ipv4_addr */
+
+ if (strcmp(tokens[2], "ipv6_addr") == 0) {
+ struct pkt_key_ipv6_addr *ipv6_addr =
+ (struct pkt_key_ipv6_addr *)m->match.hash.key;
+ struct in6_addr addr;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ memcpy(ipv6_addr->addr, addr.s6_addr, 16);
+
+ return 4;
+ } /* hash ipv6_5tuple */
+
+ if (strcmp(tokens[2], "qinq") == 0) {
+ struct pkt_key_qinq *qinq =
+ (struct pkt_key_qinq *)m->match.hash.key;
+ uint16_t svlan, cvlan;
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if ((softnic_parser_read_uint16(&svlan, tokens[3]) != 0) ||
+ svlan > 0xFFF) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "svlan");
+ return 0;
+ }
+
+ if ((softnic_parser_read_uint16(&cvlan, tokens[4]) != 0) ||
+ cvlan > 0xFFF) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "cvlan");
+ return 0;
+ }
+
+ qinq->svlan = rte_cpu_to_be_16(svlan);
+ qinq->cvlan = rte_cpu_to_be_16(cvlan);
+
+ return 5;
+ } /* hash qinq */
+
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ } /* hash */
+
+ if (strcmp(tokens[1], "lpm") == 0) {
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_LPM;
+
+ if (strcmp(tokens[2], "ipv4") == 0) {
+ struct in_addr addr;
+
+ m->match.lpm.ip_version = 1;
+
+ if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ m->match.lpm.ipv4 = rte_be_to_cpu_32(addr.s_addr);
+ } else if (strcmp(tokens[2], "ipv6") == 0) {
+ struct in6_addr addr;
+
+ m->match.lpm.ip_version = 0;
+
+ if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ memcpy(m->match.lpm.ipv6, addr.s6_addr, 16);
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "ipv4 or ipv6");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&m->match.lpm.depth, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "depth");
+ return 0;
+ }
+
+ return 5;
+ } /* lpm */
+
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "acl or array or hash or lpm");
+ return 0;
+}
+
+/**
+ * table_action ::=
+ *
+ * action
+ * fwd
+ * drop
+ * | port <port_id>
+ * | meta
+ * | table <table_id>
+ * [balance <out0> ... <out7>]
+ * [meter
+ * tc0 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * [tc1 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * tc2 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * tc3 meter <meter_profile_id> policer g <pa> y <pa> r <pa>]]
+ * [tm subport <subport_id> pipe <pipe_id>]
+ * [encap
+ * ether <da> <sa>
+ * | vlan <da> <sa> <pcp> <dei> <vid>
+ * | qinq <da> <sa> <pcp> <dei> <vid> <pcp> <dei> <vid>
+ * | mpls unicast | multicast
+ * <da> <sa>
+ * label0 <label> <tc> <ttl>
+ * [label1 <label> <tc> <ttl>
+ * [label2 <label> <tc> <ttl>
+ * [label3 <label> <tc> <ttl>]]]
+ * | pppoe <da> <sa> <session_id>]
+ * [nat ipv4 | ipv6 <addr> <port>]
+ * [ttl dec | keep]
+ * [stats]
+ * [time]
+ *
+ * where:
+ * <pa> ::= g | y | r | drop
+ */
+static uint32_t
+parse_table_action_fwd(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens == 0 ||
+ (strcmp(tokens[0], "fwd") != 0))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens && (strcmp(tokens[0], "drop") == 0)) {
+ a->fwd.action = RTE_PIPELINE_ACTION_DROP;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 1;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "port") == 0)) {
+ uint32_t id;
+
+ if (n_tokens < 2 ||
+ softnic_parser_read_uint32(&id, tokens[1]))
+ return 0;
+
+ a->fwd.action = RTE_PIPELINE_ACTION_PORT;
+ a->fwd.id = id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 2;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "meta") == 0)) {
+ a->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 1;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "table") == 0)) {
+ uint32_t id;
+
+ if (n_tokens < 2 ||
+ softnic_parser_read_uint32(&id, tokens[1]))
+ return 0;
+
+ a->fwd.action = RTE_PIPELINE_ACTION_TABLE;
+ a->fwd.id = id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 2;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_balance(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ uint32_t i;
+
+ if (n_tokens == 0 ||
+ (strcmp(tokens[0], "balance") != 0))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens < RTE_TABLE_ACTION_LB_TABLE_SIZE)
+ return 0;
+
+ for (i = 0; i < RTE_TABLE_ACTION_LB_TABLE_SIZE; i++)
+ if (softnic_parser_read_uint32(&a->lb.out[i], tokens[i]) != 0)
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_LB;
+ return 1 + RTE_TABLE_ACTION_LB_TABLE_SIZE;
+}
+
+static int
+parse_policer_action(char *token, enum rte_table_action_policer *a)
+{
+ if (strcmp(token, "g") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_GREEN;
+ return 0;
+ }
+
+ if (strcmp(token, "y") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_YELLOW;
+ return 0;
+ }
+
+ if (strcmp(token, "r") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_RED;
+ return 0;
+ }
+
+ if (strcmp(token, "drop") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_DROP;
+ return 0;
+ }
+
+ return -1;
+}
+
+static uint32_t
+parse_table_action_meter_tc(char **tokens,
+ uint32_t n_tokens,
+ struct rte_table_action_mtr_tc_params *mtr)
+{
+ if (n_tokens < 9 ||
+ strcmp(tokens[0], "meter") ||
+ softnic_parser_read_uint32(&mtr->meter_profile_id, tokens[1]) ||
+ strcmp(tokens[2], "policer") ||
+ strcmp(tokens[3], "g") ||
+ parse_policer_action(tokens[4], &mtr->policer[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[5], "y") ||
+ parse_policer_action(tokens[6], &mtr->policer[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[7], "r") ||
+ parse_policer_action(tokens[8], &mtr->policer[e_RTE_METER_RED]))
+ return 0;
+
+ return 9;
+}
+
+static uint32_t
+parse_table_action_meter(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "meter"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens < 10 ||
+ strcmp(tokens[0], "tc0") ||
+ (parse_table_action_meter_tc(tokens + 1,
+ n_tokens - 1,
+ &a->mtr.mtr[0]) == 0))
+ return 0;
+
+ tokens += 10;
+ n_tokens -= 10;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "tc1")) {
+ a->mtr.tc_mask = 1;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
+ return 1 + 10;
+ }
+
+ if (n_tokens < 30 ||
+ (parse_table_action_meter_tc(tokens + 1,
+ n_tokens - 1, &a->mtr.mtr[1]) == 0) ||
+ strcmp(tokens[10], "tc2") ||
+ (parse_table_action_meter_tc(tokens + 11,
+ n_tokens - 11, &a->mtr.mtr[2]) == 0) ||
+ strcmp(tokens[20], "tc3") ||
+ (parse_table_action_meter_tc(tokens + 21,
+ n_tokens - 21, &a->mtr.mtr[3]) == 0))
+ return 0;
+
+ a->mtr.tc_mask = 0xF;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
+ return 1 + 10 + 3 * 10;
+}
+
+static uint32_t
+parse_table_action_tm(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ uint32_t subport_id, pipe_id;
+
+ if (n_tokens < 5 ||
+ strcmp(tokens[0], "tm") ||
+ strcmp(tokens[1], "subport") ||
+ softnic_parser_read_uint32(&subport_id, tokens[2]) ||
+ strcmp(tokens[3], "pipe") ||
+ softnic_parser_read_uint32(&pipe_id, tokens[4]))
+ return 0;
+
+ a->tm.subport_id = subport_id;
+ a->tm.pipe_id = pipe_id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TM;
+ return 5;
+}
+
+static uint32_t
+parse_table_action_encap(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "encap"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ /* ether */
+ if (n_tokens && (strcmp(tokens[0], "ether") == 0)) {
+ if (n_tokens < 3 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.ether.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.ether.ether.sa))
+ return 0;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_ETHER;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 3;
+ }
+
+ /* vlan */
+ if (n_tokens && (strcmp(tokens[0], "vlan") == 0)) {
+ uint32_t pcp, dei, vid;
+
+ if (n_tokens < 6 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.vlan.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.vlan.ether.sa) ||
+ softnic_parser_read_uint32(&pcp, tokens[3]) ||
+ pcp > 0x7 ||
+ softnic_parser_read_uint32(&dei, tokens[4]) ||
+ dei > 0x1 ||
+ softnic_parser_read_uint32(&vid, tokens[5]) ||
+ vid > 0xFFF)
+ return 0;
+
+ a->encap.vlan.vlan.pcp = pcp & 0x7;
+ a->encap.vlan.vlan.dei = dei & 0x1;
+ a->encap.vlan.vlan.vid = vid & 0xFFF;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_VLAN;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 6;
+ }
+
+ /* qinq */
+ if (n_tokens && (strcmp(tokens[0], "qinq") == 0)) {
+ uint32_t svlan_pcp, svlan_dei, svlan_vid;
+ uint32_t cvlan_pcp, cvlan_dei, cvlan_vid;
+
+ if (n_tokens < 9 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.qinq.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.qinq.ether.sa) ||
+ softnic_parser_read_uint32(&svlan_pcp, tokens[3]) ||
+ svlan_pcp > 0x7 ||
+ softnic_parser_read_uint32(&svlan_dei, tokens[4]) ||
+ svlan_dei > 0x1 ||
+ softnic_parser_read_uint32(&svlan_vid, tokens[5]) ||
+ svlan_vid > 0xFFF ||
+ softnic_parser_read_uint32(&cvlan_pcp, tokens[6]) ||
+ cvlan_pcp > 0x7 ||
+ softnic_parser_read_uint32(&cvlan_dei, tokens[7]) ||
+ cvlan_dei > 0x1 ||
+ softnic_parser_read_uint32(&cvlan_vid, tokens[8]) ||
+ cvlan_vid > 0xFFF)
+ return 0;
+
+ a->encap.qinq.svlan.pcp = svlan_pcp & 0x7;
+ a->encap.qinq.svlan.dei = svlan_dei & 0x1;
+ a->encap.qinq.svlan.vid = svlan_vid & 0xFFF;
+ a->encap.qinq.cvlan.pcp = cvlan_pcp & 0x7;
+ a->encap.qinq.cvlan.dei = cvlan_dei & 0x1;
+ a->encap.qinq.cvlan.vid = cvlan_vid & 0xFFF;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_QINQ;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 9;
+ }
+
+ /* mpls */
+ if (n_tokens && (strcmp(tokens[0], "mpls") == 0)) {
+ uint32_t label, tc, ttl;
+
+ if (n_tokens < 8)
+ return 0;
+
+ if (strcmp(tokens[1], "unicast") == 0)
+ a->encap.mpls.unicast = 1;
+ else if (strcmp(tokens[1], "multicast") == 0)
+ a->encap.mpls.unicast = 0;
+ else
+ return 0;
+
+ if (softnic_parse_mac_addr(tokens[2], &a->encap.mpls.ether.da) ||
+ softnic_parse_mac_addr(tokens[3], &a->encap.mpls.ether.sa) ||
+ strcmp(tokens[4], "label0") ||
+ softnic_parser_read_uint32(&label, tokens[5]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[6]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[7]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[0].label = label;
+ a->encap.mpls.mpls[0].tc = tc;
+ a->encap.mpls.mpls[0].ttl = ttl;
+
+ tokens += 8;
+ n_tokens -= 8;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "label1")) {
+ a->encap.mpls.mpls_count = 1;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8;
+ }
+
+ if (n_tokens < 4 ||
+ softnic_parser_read_uint32(&label, tokens[1]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[2]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[3]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[1].label = label;
+ a->encap.mpls.mpls[1].tc = tc;
+ a->encap.mpls.mpls[1].ttl = ttl;
+
+ tokens += 4;
+ n_tokens -= 4;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "label2")) {
+ a->encap.mpls.mpls_count = 2;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4;
+ }
+
+ if (n_tokens < 4 ||
+ softnic_parser_read_uint32(&label, tokens[1]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[2]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[3]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[2].label = label;
+ a->encap.mpls.mpls[2].tc = tc;
+ a->encap.mpls.mpls[2].ttl = ttl;
+
+ tokens += 4;
+ n_tokens -= 4;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "label3")) {
+ a->encap.mpls.mpls_count = 3;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4 + 4;
+ }
+
+ if (n_tokens < 4 ||
+ softnic_parser_read_uint32(&label, tokens[1]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[2]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[3]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[3].label = label;
+ a->encap.mpls.mpls[3].tc = tc;
+ a->encap.mpls.mpls[3].ttl = ttl;
+
+ a->encap.mpls.mpls_count = 4;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4 + 4 + 4;
+ }
+
+ /* pppoe */
+ if (n_tokens && (strcmp(tokens[0], "pppoe") == 0)) {
+ if (n_tokens < 4 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.pppoe.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.pppoe.ether.sa) ||
+ softnic_parser_read_uint16(&a->encap.pppoe.pppoe.session_id,
+ tokens[3]))
+ return 0;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_PPPOE;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 4;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_nat(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 4 ||
+ strcmp(tokens[0], "nat"))
+ return 0;
+
+ if (strcmp(tokens[1], "ipv4") == 0) {
+ struct in_addr addr;
+ uint16_t port;
+
+ if (softnic_parse_ipv4_addr(tokens[2], &addr) ||
+ softnic_parser_read_uint16(&port, tokens[3]))
+ return 0;
+
+ a->nat.ip_version = 1;
+ a->nat.addr.ipv4 = rte_be_to_cpu_32(addr.s_addr);
+ a->nat.port = port;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_NAT;
+ return 4;
+ }
+
+ if (strcmp(tokens[1], "ipv6") == 0) {
+ struct in6_addr addr;
+ uint16_t port;
+
+ if (softnic_parse_ipv6_addr(tokens[2], &addr) ||
+ softnic_parser_read_uint16(&port, tokens[3]))
+ return 0;
+
+ a->nat.ip_version = 0;
+ memcpy(a->nat.addr.ipv6, addr.s6_addr, 16);
+ a->nat.port = port;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_NAT;
+ return 4;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_ttl(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 2 ||
+ strcmp(tokens[0], "ttl"))
+ return 0;
+
+ if (strcmp(tokens[1], "dec") == 0)
+ a->ttl.decrement = 1;
+ else if (strcmp(tokens[1], "keep") == 0)
+ a->ttl.decrement = 0;
+ else
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TTL;
+ return 2;
+}
+
+static uint32_t
+parse_table_action_stats(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 1 ||
+ strcmp(tokens[0], "stats"))
+ return 0;
+
+ a->stats.n_packets = 0;
+ a->stats.n_bytes = 0;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
+ return 1;
+}
+
+static uint32_t
+parse_table_action_time(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 1 ||
+ strcmp(tokens[0], "time"))
+ return 0;
+
+ a->time.time = rte_rdtsc();
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TIME;
+ return 1;
+}
+
+static uint32_t
+parse_table_action(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size,
+ struct softnic_table_rule_action *a)
+{
+ uint32_t n_tokens0 = n_tokens;
+
+ memset(a, 0, sizeof(*a));
+
+ if (n_tokens < 2 ||
+ strcmp(tokens[0], "action"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens && (strcmp(tokens[0], "fwd") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_fwd(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action fwd");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "balance") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_balance(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action balance");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "meter") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_meter(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action meter");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "tm") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_tm(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action tm");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "encap") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_encap(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action encap");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "nat") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_nat(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action nat");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "ttl") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_ttl(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action ttl");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "stats") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_stats(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action stats");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "time") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_time(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action time");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens0 - n_tokens == 1) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "action");
+ return 0;
+ }
+
+ return n_tokens0 - n_tokens;
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule add
+ * match <match>
+ * action <table_action>
+ */
+static void
+cmd_softnic_pipeline_table_rule_add(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_match m;
+ struct softnic_table_rule_action a;
+ char *pipeline_name;
+ void *data;
+ uint32_t table_id, t0, n_tokens_parsed;
+ int status;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ t0 = 6;
+
+ /* match */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ /* action */
+ n_tokens_parsed = parse_table_action(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &a);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ if (t0 != n_tokens) {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_add(softnic,
+ pipeline_name,
+ table_id,
+ &m,
+ &a,
+ &data);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule add
+ * match
+ * default
+ * action
+ * fwd
+ * drop
+ * | port <port_id>
+ * | meta
+ * | table <table_id>
+ */
+static void
+cmd_softnic_pipeline_table_rule_add_default(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_action action;
+ void *data;
+ char *pipeline_name;
+ uint32_t table_id;
+ int status;
+
+ if (n_tokens != 11 &&
+ n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[6], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match");
+ return;
+ }
+
+ if (strcmp(tokens[7], "default") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "default");
+ return;
+ }
+
+ if (strcmp(tokens[8], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "action");
+ return;
+ }
+
+ if (strcmp(tokens[9], "fwd") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "fwd");
+ return;
+ }
+
+ action.action_mask = 1 << RTE_TABLE_ACTION_FWD;
+
+ if (strcmp(tokens[10], "drop") == 0) {
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_DROP;
+ } else if (strcmp(tokens[10], "port") == 0) {
+ uint32_t id;
+
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&id, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_PORT;
+ action.fwd.id = id;
+ } else if (strcmp(tokens[10], "meta") == 0) {
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_PORT_META;
+ } else if (strcmp(tokens[10], "table") == 0) {
+ uint32_t id;
+
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&id, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_TABLE;
+ action.fwd.id = id;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "drop or port or meta or table");
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_add_default(softnic,
+ pipeline_name,
+ table_id,
+ &action,
+ &data);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule add bulk <file_name> <n_rules>
+ *
+ * File <file_name>:
+ * - line format: match <match> action <action>
+ */
+static int
+cli_rule_file_process(const char *file_name,
+ size_t line_len_max,
+ struct softnic_table_rule_match *m,
+ struct softnic_table_rule_action *a,
+ uint32_t *n_rules,
+ uint32_t *line_number,
+ char *out,
+ size_t out_size);
+
+static void
+cmd_softnic_pipeline_table_rule_add_bulk(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_match *match;
+ struct softnic_table_rule_action *action;
+ void **data;
+ char *pipeline_name, *file_name;
+ uint32_t table_id, n_rules, n_rules_parsed, line_number;
+ int status;
+
+ if (n_tokens != 9) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[6], "bulk") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "bulk");
+ return;
+ }
+
+ file_name = tokens[7];
+
+ if ((softnic_parser_read_uint32(&n_rules, tokens[8]) != 0) ||
+ n_rules == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ /* Memory allocation. */
+ match = calloc(n_rules, sizeof(struct softnic_table_rule_match));
+ action = calloc(n_rules, sizeof(struct softnic_table_rule_action));
+ data = calloc(n_rules, sizeof(void *));
+ if (match == NULL ||
+ action == NULL ||
+ data == NULL) {
+ snprintf(out, out_size, MSG_OUT_OF_MEMORY);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Load rule file */
+ n_rules_parsed = n_rules;
+ status = cli_rule_file_process(file_name,
+ 1024,
+ match,
+ action,
+ &n_rules_parsed,
+ &line_number,
+ out,
+ out_size);
+ if (status) {
+ snprintf(out, out_size, MSG_FILE_ERR, file_name, line_number);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+ if (n_rules_parsed != n_rules) {
+ snprintf(out, out_size, MSG_FILE_NOT_ENOUGH, file_name);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Rule bulk add */
+ status = softnic_pipeline_table_rule_add_bulk(softnic,
+ pipeline_name,
+ table_id,
+ match,
+ action,
+ data,
+ &n_rules);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Memory free */
+ free(data);
+ free(action);
+ free(match);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule delete
+ * match <match>
+ */
+static void
+cmd_softnic_pipeline_table_rule_delete(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_match m;
+ char *pipeline_name;
+ uint32_t table_id, n_tokens_parsed, t0;
+ int status;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ t0 = 6;
+
+ /* match */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ if (n_tokens != t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_delete(softnic,
+ pipeline_name,
+ table_id,
+ &m);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule delete
+ * match
+ * default
+ */
+static void
+cmd_softnic_pipeline_table_rule_delete_default(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t table_id;
+ int status;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ if (strcmp(tokens[6], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match");
+ return;
+ }
+
+ if (strcmp(tokens[7], "default") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "default");
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_delete_default(softnic,
+ pipeline_name,
+ table_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule read stats [clear]
+ */
+static void
+cmd_softnic_pipeline_table_rule_stats_read(struct pmd_internals *softnic __rte_unused,
+ char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> meter profile <meter_profile_id>
+ * add srtcm cir <cir> cbs <cbs> ebs <ebs>
+ * | trtcm cir <cir> pir <pir> cbs <cbs> pbs <pbs>
+ */
+static void
+cmd_pipeline_table_meter_profile_add(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_table_action_meter_profile p;
+ char *pipeline_name;
+ uint32_t table_id, meter_profile_id;
+ int status;
+
+ if (n_tokens < 9) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "meter") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "meter");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&meter_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "meter_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[7], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[8], "srtcm") == 0) {
+ if (n_tokens != 15) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return;
+ }
+
+ p.alg = RTE_TABLE_ACTION_METER_SRTCM;
+
+ if (strcmp(tokens[9], "cir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cir");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.srtcm.cir, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cir");
+ return;
+ }
+
+ if (strcmp(tokens[11], "cbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cbs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.srtcm.cbs, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cbs");
+ return;
+ }
+
+ if (strcmp(tokens[13], "ebs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "ebs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.srtcm.ebs, tokens[14]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ebs");
+ return;
+ }
+ } else if (strcmp(tokens[8], "trtcm") == 0) {
+ if (n_tokens != 17) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ p.alg = RTE_TABLE_ACTION_METER_TRTCM;
+
+ if (strcmp(tokens[9], "cir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cir");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.cir, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cir");
+ return;
+ }
+
+ if (strcmp(tokens[11], "pir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pir");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.pir, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pir");
+ return;
+ }
+ if (strcmp(tokens[13], "cbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cbs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.cbs, tokens[14]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cbs");
+ return;
+ }
+
+ if (strcmp(tokens[15], "pbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pbs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.pbs, tokens[16]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pbs");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_mtr_profile_add(softnic,
+ pipeline_name,
+ table_id,
+ meter_profile_id,
+ &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id>
+ * meter profile <meter_profile_id> delete
+ */
+static void
+cmd_pipeline_table_meter_profile_delete(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t table_id, meter_profile_id;
+ int status;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "meter") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "meter");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&meter_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "meter_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[7], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ status = softnic_pipeline_table_mtr_profile_delete(softnic,
+ pipeline_name,
+ table_id,
+ meter_profile_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule read meter [clear]
+ */
+static void
+cmd_pipeline_table_rule_meter_read(struct pmd_internals *softnic __rte_unused,
+ char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> dscp <file_name>
+ *
+ * File <file_name>:
+ * - exactly 64 lines
+ * - line format: <tc_id> <tc_queue_id> <color>, with <color> as: g | y | r
+ */
+static int
+load_dscp_table(struct rte_table_action_dscp_table *dscp_table,
+ const char *file_name,
+ uint32_t *line_number)
+{
+ FILE *f = NULL;
+ uint32_t dscp, l;
+
+ /* Check input arguments */
+ if (dscp_table == NULL ||
+ file_name == NULL ||
+ line_number == NULL) {
+ if (line_number)
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Open input file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Read file */
+ for (dscp = 0, l = 1; ; l++) {
+ char line[64];
+ char *tokens[3];
+ enum rte_meter_color color;
+ uint32_t tc_id, tc_queue_id, n_tokens = RTE_DIM(tokens);
+
+ if (fgets(line, sizeof(line), f) == NULL)
+ break;
+
+ if (is_comment(line))
+ continue;
+
+ if (softnic_parse_tokenize_string(line, tokens, &n_tokens)) {
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ if (n_tokens == 0)
+ continue;
+
+ if (dscp >= RTE_DIM(dscp_table->entry) ||
+ n_tokens != RTE_DIM(tokens) ||
+ softnic_parser_read_uint32(&tc_id, tokens[0]) ||
+ tc_id >= RTE_TABLE_ACTION_TC_MAX ||
+ softnic_parser_read_uint32(&tc_queue_id, tokens[1]) ||
+ tc_queue_id >= RTE_TABLE_ACTION_TC_QUEUE_MAX ||
+ (strlen(tokens[2]) != 1)) {
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ switch (tokens[2][0]) {
+ case 'g':
+ case 'G':
+ color = e_RTE_METER_GREEN;
+ break;
+
+ case 'y':
+ case 'Y':
+ color = e_RTE_METER_YELLOW;
+ break;
+
+ case 'r':
+ case 'R':
+ color = e_RTE_METER_RED;
+ break;
+
+ default:
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ dscp_table->entry[dscp].tc_id = tc_id;
+ dscp_table->entry[dscp].tc_queue_id = tc_queue_id;
+ dscp_table->entry[dscp].color = color;
+ dscp++;
+ }
+
+ /* Close file */
+ fclose(f);
+ return 0;
+}
+
+static void
+cmd_pipeline_table_dscp(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_table_action_dscp_table dscp_table;
+ char *pipeline_name, *file_name;
+ uint32_t table_id, line_number;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "dscp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dscp");
+ return;
+ }
+
+ file_name = tokens[5];
+
+ status = load_dscp_table(&dscp_table, file_name, &line_number);
+ if (status) {
+ snprintf(out, out_size, MSG_FILE_ERR, file_name, line_number);
+ return;
+ }
+
+ status = softnic_pipeline_table_dscp_table_update(softnic,
+ pipeline_name,
+ table_id,
+ UINT64_MAX,
+ &dscp_table);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule read ttl [clear]
+ */
+static void
+cmd_softnic_pipeline_table_rule_ttl_read(struct pmd_internals *softnic __rte_unused,
+ char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+/**
+ * thread <thread_id> pipeline <pipeline_name> enable
+ */
+static void
+cmd_softnic_thread_pipeline_enable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t thread_id;
+ int status;
+
+ if (n_tokens != 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&thread_id, tokens[1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+ return;
+ }
+
+ if (strcmp(tokens[2], "pipeline") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+ return;
+ }
+
+ pipeline_name = tokens[3];
+
+ if (strcmp(tokens[4], "enable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable");
+ return;
+ }
+
+ status = softnic_thread_pipeline_enable(softnic, thread_id, pipeline_name);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, "thread pipeline enable");
+ return;
+ }
+}
+
+/**
+ * thread <thread_id> pipeline <pipeline_name> disable
+ */
+static void
+cmd_softnic_thread_pipeline_disable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t thread_id;
+ int status;
+
+ if (n_tokens != 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&thread_id, tokens[1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+ return;
+ }
+
+ if (strcmp(tokens[2], "pipeline") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+ return;
+ }
+
+ pipeline_name = tokens[3];
+
+ if (strcmp(tokens[4], "disable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable");
+ return;
+ }
+
+ status = softnic_thread_pipeline_disable(softnic, thread_id, pipeline_name);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL,
+ "thread pipeline disable");
+ return;
+ }
+}
+
+void
+softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
+{
+ char *tokens[CMD_MAX_TOKENS];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ struct pmd_internals *softnic = arg;
+ int status;
+
+ if (is_comment(in))
+ return;
+
+ status = softnic_parse_tokenize_string(in, tokens, &n_tokens);
+ if (status) {
+ snprintf(out, out_size, MSG_ARG_TOO_MANY, "");
+ return;
+ }
+
+ if (n_tokens == 0)
+ return;
+
+ if (strcmp(tokens[0], "mempool") == 0) {
+ cmd_mempool(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "link") == 0) {
+ cmd_link(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "swq") == 0) {
+ cmd_swq(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "tmgr") == 0) {
+ if (n_tokens == 2) {
+ cmd_tmgr(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[1], "shaper") == 0) &&
+ (strcmp(tokens[2], "profile") == 0)) {
+ cmd_tmgr_shaper_profile(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[1], "shared") == 0) &&
+ (strcmp(tokens[2], "shaper") == 0)) {
+ cmd_tmgr_shared_shaper(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[1], "node") == 0)) {
+ cmd_tmgr_node(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[1], "hierarchy-default") == 0)) {
+ cmd_tmgr_hierarchy_default(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[1], "hierarchy") == 0) &&
+ (strcmp(tokens[2], "commit") == 0)) {
+ cmd_tmgr_hierarchy_commit(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+ }
+
+ if (strcmp(tokens[0], "tap") == 0) {
+ cmd_tap(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "port") == 0) {
+ cmd_port_in_action_profile(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "table") == 0) {
+ cmd_table_action_profile(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "pipeline") == 0) {
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[2], "period") == 0)) {
+ cmd_pipeline(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[4], "bsz") == 0)) {
+ cmd_pipeline_port_in(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "out") == 0) &&
+ (strcmp(tokens[4], "bsz") == 0)) {
+ cmd_pipeline_port_out(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 4 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[3], "match") == 0)) {
+ cmd_pipeline_table(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "table") == 0)) {
+ cmd_pipeline_port_in_table(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "stats") == 0)) {
+ cmd_pipeline_port_in_stats(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "enable") == 0)) {
+ cmd_softnic_pipeline_port_in_enable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "disable") == 0)) {
+ cmd_softnic_pipeline_port_in_disable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "out") == 0) &&
+ (strcmp(tokens[5], "stats") == 0)) {
+ cmd_pipeline_port_out_stats(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "stats") == 0)) {
+ cmd_pipeline_table_stats(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "add") == 0) &&
+ (strcmp(tokens[6], "match") == 0)) {
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[7], "default") == 0)) {
+ cmd_softnic_pipeline_table_rule_add_default(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ cmd_softnic_pipeline_table_rule_add(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "add") == 0) &&
+ (strcmp(tokens[6], "bulk") == 0)) {
+ cmd_softnic_pipeline_table_rule_add_bulk(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "delete") == 0) &&
+ (strcmp(tokens[6], "match") == 0)) {
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[7], "default") == 0)) {
+ cmd_softnic_pipeline_table_rule_delete_default(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ cmd_softnic_pipeline_table_rule_delete(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "stats") == 0)) {
+ cmd_softnic_pipeline_table_rule_stats_read(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "meter") == 0) &&
+ (strcmp(tokens[5], "profile") == 0) &&
+ (strcmp(tokens[7], "add") == 0)) {
+ cmd_pipeline_table_meter_profile_add(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "meter") == 0) &&
+ (strcmp(tokens[5], "profile") == 0) &&
+ (strcmp(tokens[7], "delete") == 0)) {
+ cmd_pipeline_table_meter_profile_delete(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "meter") == 0)) {
+ cmd_pipeline_table_rule_meter_read(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "dscp") == 0)) {
+ cmd_pipeline_table_dscp(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "ttl") == 0)) {
+ cmd_softnic_pipeline_table_rule_ttl_read(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+ }
+
+ if (strcmp(tokens[0], "thread") == 0) {
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[4], "enable") == 0)) {
+ cmd_softnic_thread_pipeline_enable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[4], "disable") == 0)) {
+ cmd_softnic_thread_pipeline_disable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+ }
+
+ snprintf(out, out_size, MSG_CMD_UNKNOWN, tokens[0]);
+}
+
+int
+softnic_cli_script_process(struct pmd_internals *softnic,
+ const char *file_name,
+ size_t msg_in_len_max,
+ size_t msg_out_len_max)
+{
+ char *msg_in = NULL, *msg_out = NULL;
+ FILE *f = NULL;
+
+ /* Check input arguments */
+ if (file_name == NULL ||
+ (strlen(file_name) == 0) ||
+ msg_in_len_max == 0 ||
+ msg_out_len_max == 0)
+ return -EINVAL;
+
+ msg_in = malloc(msg_in_len_max + 1);
+ msg_out = malloc(msg_out_len_max + 1);
+ if (msg_in == NULL ||
+ msg_out == NULL) {
+ free(msg_out);
+ free(msg_in);
+ return -ENOMEM;
+ }
+
+ /* Open input file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ free(msg_out);
+ free(msg_in);
+ return -EIO;
+ }
+
+ /* Read file */
+ for ( ; ; ) {
+ if (fgets(msg_in, msg_in_len_max + 1, f) == NULL)
+ break;
+
+ printf("%s", msg_in);
+ msg_out[0] = 0;
+
+ softnic_cli_process(msg_in,
+ msg_out,
+ msg_out_len_max,
+ softnic);
+
+ if (strlen(msg_out))
+ printf("%s", msg_out);
+ }
+
+ /* Close file */
+ fclose(f);
+ free(msg_out);
+ free(msg_in);
+ return 0;
+}
+
+static int
+cli_rule_file_process(const char *file_name,
+ size_t line_len_max,
+ struct softnic_table_rule_match *m,
+ struct softnic_table_rule_action *a,
+ uint32_t *n_rules,
+ uint32_t *line_number,
+ char *out,
+ size_t out_size)
+{
+ FILE *f = NULL;
+ char *line = NULL;
+ uint32_t rule_id, line_id;
+ int status = 0;
+
+ /* Check input arguments */
+ if (file_name == NULL ||
+ (strlen(file_name) == 0) ||
+ line_len_max == 0) {
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Memory allocation */
+ line = malloc(line_len_max + 1);
+ if (line == NULL) {
+ *line_number = 0;
+ return -ENOMEM;
+ }
+
+ /* Open file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ *line_number = 0;
+ free(line);
+ return -EIO;
+ }
+
+ /* Read file */
+ for (line_id = 1, rule_id = 0; rule_id < *n_rules; line_id++) {
+ char *tokens[CMD_MAX_TOKENS];
+ uint32_t n_tokens, n_tokens_parsed, t0;
+
+ /* Read next line from file. */
+ if (fgets(line, line_len_max + 1, f) == NULL)
+ break;
+
+ /* Comment. */
+ if (is_comment(line))
+ continue;
+
+ /* Parse line. */
+ n_tokens = RTE_DIM(tokens);
+ status = softnic_parse_tokenize_string(line, tokens, &n_tokens);
+ if (status) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* Empty line. */
+ if (n_tokens == 0)
+ continue;
+ t0 = 0;
+
+ /* Rule match. */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m[rule_id]);
+ if (n_tokens_parsed == 0) {
+ status = -EINVAL;
+ break;
+ }
+ t0 += n_tokens_parsed;
+
+ /* Rule action. */
+ n_tokens_parsed = parse_table_action(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &a[rule_id]);
+ if (n_tokens_parsed == 0) {
+ status = -EINVAL;
+ break;
+ }
+ t0 += n_tokens_parsed;
+
+ /* Line completed. */
+ if (t0 < n_tokens) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* Increment rule count */
+ rule_id++;
+ }
+
+ /* Close file */
+ fclose(f);
+
+ /* Memory free */
+ free(line);
+
+ *n_rules = rule_id;
+ *line_number = line_id;
+ return status;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
index 050e3e7e..a25eb874 100644
--- a/drivers/net/softnic/rte_eth_softnic_internals.h
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -5,79 +5,97 @@
#ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
#define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
+#include <stddef.h>
#include <stdint.h>
+#include <sys/queue.h>
+#include <rte_mempool.h>
#include <rte_mbuf.h>
+#include <rte_ring.h>
+#include <rte_ethdev.h>
#include <rte_sched.h>
+#include <rte_port_in_action.h>
+#include <rte_table_action.h>
+#include <rte_pipeline.h>
+
#include <rte_ethdev_driver.h>
#include <rte_tm_driver.h>
#include "rte_eth_softnic.h"
+#include "conn.h"
+
+#define NAME_SIZE 64
/**
* PMD Parameters
*/
-enum pmd_feature {
- PMD_FEATURE_TM = 1, /**< Traffic Management (TM) */
-};
-
-#ifndef INTRUSIVE
-#define INTRUSIVE 0
-#endif
-
struct pmd_params {
- /** Parameters for the soft device (to be created) */
- struct {
- const char *name; /**< Name */
- uint32_t flags; /**< Flags */
+ const char *name;
+ const char *firmware;
+ uint16_t conn_port;
+ uint32_t cpu_id;
- /** 0 = Access hard device though API only (potentially slower,
- * but safer);
- * 1 = Access hard device private data structures is allowed
- * (potentially faster).
- */
- int intrusive;
+ /** Traffic Management (TM) */
+ struct {
+ uint32_t n_queues; /**< Number of queues */
+ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ } tm;
+};
- /** Traffic Management (TM) */
- struct {
- uint32_t rate; /**< Rate (bytes/second) */
- uint32_t nb_queues; /**< Number of queues */
- uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- /**< Queue size per traffic class */
- uint32_t enq_bsz; /**< Enqueue burst size */
- uint32_t deq_bsz; /**< Dequeue burst size */
- } tm;
- } soft;
+/**
+ * MEMPOOL
+ */
+struct softnic_mempool_params {
+ uint32_t buffer_size;
+ uint32_t pool_size;
+ uint32_t cache_size;
+};
- /** Parameters for the hard device (existing) */
- struct {
- char *name; /**< Name */
- uint16_t tx_queue_id; /**< TX queue ID */
- } hard;
+struct softnic_mempool {
+ TAILQ_ENTRY(softnic_mempool) node;
+ char name[NAME_SIZE];
+ struct rte_mempool *m;
+ uint32_t buffer_size;
};
+TAILQ_HEAD(softnic_mempool_list, softnic_mempool);
+
/**
- * Default Internals
+ * SWQ
*/
+struct softnic_swq_params {
+ uint32_t size;
+};
-#ifndef DEFAULT_BURST_SIZE
-#define DEFAULT_BURST_SIZE 32
-#endif
+struct softnic_swq {
+ TAILQ_ENTRY(softnic_swq) node;
+ char name[NAME_SIZE];
+ struct rte_ring *r;
+};
-#ifndef FLUSH_COUNT_THRESHOLD
-#define FLUSH_COUNT_THRESHOLD (1 << 17)
-#endif
+TAILQ_HEAD(softnic_swq_list, softnic_swq);
+
+/**
+ * LINK
+ */
+struct softnic_link_params {
+ const char *dev_name;
+ uint16_t port_id; /**< Valid only when *dev_name* is NULL. */
+};
-struct default_internals {
- struct rte_mbuf **pkts;
- uint32_t pkts_len;
- uint32_t txq_pos;
- uint32_t flush_count;
+struct softnic_link {
+ TAILQ_ENTRY(softnic_link) node;
+ char name[NAME_SIZE];
+ uint16_t port_id;
+ uint32_t n_rxq;
+ uint32_t n_txq;
};
+TAILQ_HEAD(softnic_link_list, softnic_link);
+
/**
- * Traffic Management (TM) Internals
+ * TMGR
*/
#ifndef TM_MAX_SUBPORTS
@@ -185,16 +203,281 @@ struct tm_internals {
/** Blueprints */
struct tm_params params;
+};
+
+struct softnic_tmgr_port {
+ TAILQ_ENTRY(softnic_tmgr_port) node;
+ char name[NAME_SIZE];
+ struct rte_sched_port *s;
+};
+
+TAILQ_HEAD(softnic_tmgr_port_list, softnic_tmgr_port);
+
+/**
+ * TAP
+ */
+struct softnic_tap {
+ TAILQ_ENTRY(softnic_tap) node;
+ char name[NAME_SIZE];
+ int fd;
+};
+
+TAILQ_HEAD(softnic_tap_list, softnic_tap);
+
+/**
+ * Input port action
+ */
+struct softnic_port_in_action_profile_params {
+ uint64_t action_mask;
+ struct rte_port_in_action_fltr_config fltr;
+ struct rte_port_in_action_lb_config lb;
+};
+
+struct softnic_port_in_action_profile {
+ TAILQ_ENTRY(softnic_port_in_action_profile) node;
+ char name[NAME_SIZE];
+ struct softnic_port_in_action_profile_params params;
+ struct rte_port_in_action_profile *ap;
+};
+
+TAILQ_HEAD(softnic_port_in_action_profile_list, softnic_port_in_action_profile);
+
+/**
+ * Table action
+ */
+struct softnic_table_action_profile_params {
+ uint64_t action_mask;
+ struct rte_table_action_common_config common;
+ struct rte_table_action_lb_config lb;
+ struct rte_table_action_mtr_config mtr;
+ struct rte_table_action_tm_config tm;
+ struct rte_table_action_encap_config encap;
+ struct rte_table_action_nat_config nat;
+ struct rte_table_action_ttl_config ttl;
+ struct rte_table_action_stats_config stats;
+};
+
+struct softnic_table_action_profile {
+ TAILQ_ENTRY(softnic_table_action_profile) node;
+ char name[NAME_SIZE];
+ struct softnic_table_action_profile_params params;
+ struct rte_table_action_profile *ap;
+};
+
+TAILQ_HEAD(softnic_table_action_profile_list, softnic_table_action_profile);
+
+/**
+ * Pipeline
+ */
+struct pipeline_params {
+ uint32_t timer_period_ms;
+ uint32_t offset_port_id;
+};
+
+enum softnic_port_in_type {
+ PORT_IN_RXQ,
+ PORT_IN_SWQ,
+ PORT_IN_TMGR,
+ PORT_IN_TAP,
+ PORT_IN_SOURCE,
+};
+
+struct softnic_port_in_params {
+ /* Read */
+ enum softnic_port_in_type type;
+ const char *dev_name;
+ union {
+ struct {
+ uint16_t queue_id;
+ } rxq;
+
+ struct {
+ const char *mempool_name;
+ uint32_t mtu;
+ } tap;
+
+ struct {
+ const char *mempool_name;
+ const char *file_name;
+ uint32_t n_bytes_per_pkt;
+ } source;
+ };
+ uint32_t burst_size;
+
+ /* Action */
+ const char *action_profile_name;
+};
+
+enum softnic_port_out_type {
+ PORT_OUT_TXQ,
+ PORT_OUT_SWQ,
+ PORT_OUT_TMGR,
+ PORT_OUT_TAP,
+ PORT_OUT_SINK,
+};
+
+struct softnic_port_out_params {
+ enum softnic_port_out_type type;
+ const char *dev_name;
+ union {
+ struct {
+ uint16_t queue_id;
+ } txq;
- /** Run-time */
- struct rte_sched_port *sched;
- struct rte_mbuf **pkts_enq;
- struct rte_mbuf **pkts_deq;
- uint32_t pkts_enq_len;
- uint32_t txq_pos;
- uint32_t flush_count;
+ struct {
+ const char *file_name;
+ uint32_t max_n_pkts;
+ } sink;
+ };
+ uint32_t burst_size;
+ int retry;
+ uint32_t n_retries;
+};
+
+enum softnic_table_type {
+ TABLE_ACL,
+ TABLE_ARRAY,
+ TABLE_HASH,
+ TABLE_LPM,
+ TABLE_STUB,
+};
+
+struct softnic_table_acl_params {
+ uint32_t n_rules;
+ uint32_t ip_header_offset;
+ int ip_version;
+};
+
+struct softnic_table_array_params {
+ uint32_t n_keys;
+ uint32_t key_offset;
+};
+
+struct softnic_table_hash_params {
+ uint32_t n_keys;
+ uint32_t key_offset;
+ uint32_t key_size;
+ uint8_t *key_mask;
+ uint32_t n_buckets;
+ int extendable_bucket;
+};
+
+struct softnic_table_lpm_params {
+ uint32_t n_rules;
+ uint32_t key_offset;
+ uint32_t key_size;
+};
+
+struct softnic_table_params {
+ /* Match */
+ enum softnic_table_type match_type;
+ union {
+ struct softnic_table_acl_params acl;
+ struct softnic_table_array_params array;
+ struct softnic_table_hash_params hash;
+ struct softnic_table_lpm_params lpm;
+ } match;
+
+ /* Action */
+ const char *action_profile_name;
+};
+
+struct softnic_port_in {
+ struct softnic_port_in_params params;
+ struct softnic_port_in_action_profile *ap;
+ struct rte_port_in_action *a;
+};
+
+struct softnic_table {
+ struct softnic_table_params params;
+ struct softnic_table_action_profile *ap;
+ struct rte_table_action *a;
};
+struct pipeline {
+ TAILQ_ENTRY(pipeline) node;
+ char name[NAME_SIZE];
+
+ struct rte_pipeline *p;
+ struct softnic_port_in port_in[RTE_PIPELINE_PORT_IN_MAX];
+ struct softnic_table table[RTE_PIPELINE_TABLE_MAX];
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ uint32_t n_tables;
+
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint32_t timer_period_ms;
+
+ int enabled;
+ uint32_t thread_id;
+ uint32_t cpu_id;
+};
+
+TAILQ_HEAD(pipeline_list, pipeline);
+
+/**
+ * Thread
+ */
+#ifndef THREAD_PIPELINES_MAX
+#define THREAD_PIPELINES_MAX 256
+#endif
+
+#ifndef THREAD_MSGQ_SIZE
+#define THREAD_MSGQ_SIZE 64
+#endif
+
+#ifndef THREAD_TIMER_PERIOD_MS
+#define THREAD_TIMER_PERIOD_MS 100
+#endif
+
+/**
+ * Master thead: data plane thread context
+ */
+struct softnic_thread {
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+
+ uint32_t enabled;
+};
+
+/**
+ * Data plane threads: context
+ */
+#ifndef TABLE_RULE_ACTION_SIZE_MAX
+#define TABLE_RULE_ACTION_SIZE_MAX 2048
+#endif
+
+struct softnic_table_data {
+ struct rte_table_action *a;
+};
+
+struct pipeline_data {
+ struct rte_pipeline *p;
+ struct softnic_table_data table_data[RTE_PIPELINE_TABLE_MAX];
+ uint32_t n_tables;
+
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint64_t timer_period; /* Measured in CPU cycles. */
+ uint64_t time_next;
+
+ uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
+};
+
+struct softnic_thread_data {
+ struct rte_pipeline *p[THREAD_PIPELINES_MAX];
+ uint32_t n_pipelines;
+
+ struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint64_t timer_period; /* Measured in CPU cycles. */
+ uint64_t time_next;
+ uint64_t time_next_min;
+ uint64_t iter;
+} __rte_cache_aligned;
+
/**
* PMD Internals
*/
@@ -202,61 +485,426 @@ struct pmd_internals {
/** Params */
struct pmd_params params;
- /** Soft device */
struct {
- struct default_internals def; /**< Default */
struct tm_internals tm; /**< Traffic Management */
} soft;
- /** Hard device */
- struct {
- uint16_t port_id;
- } hard;
-};
-
-struct pmd_rx_queue {
- /** Hard device */
- struct {
- uint16_t port_id;
- uint16_t rx_queue_id;
- } hard;
+ struct softnic_conn *conn;
+ struct softnic_mempool_list mempool_list;
+ struct softnic_swq_list swq_list;
+ struct softnic_link_list link_list;
+ struct softnic_tmgr_port_list tmgr_port_list;
+ struct softnic_tap_list tap_list;
+ struct softnic_port_in_action_profile_list port_in_action_profile_list;
+ struct softnic_table_action_profile_list table_action_profile_list;
+ struct pipeline_list pipeline_list;
+ struct softnic_thread thread[RTE_MAX_LCORE];
+ struct softnic_thread_data thread_data[RTE_MAX_LCORE];
};
/**
- * Traffic Management (TM) Operation
+ * MEMPOOL
*/
-extern const struct rte_tm_ops pmd_tm_ops;
+int
+softnic_mempool_init(struct pmd_internals *p);
+
+void
+softnic_mempool_free(struct pmd_internals *p);
+
+struct softnic_mempool *
+softnic_mempool_find(struct pmd_internals *p,
+ const char *name);
+struct softnic_mempool *
+softnic_mempool_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_mempool_params *params);
+
+/**
+ * SWQ
+ */
int
-tm_params_check(struct pmd_params *params, uint32_t hard_rate);
+softnic_swq_init(struct pmd_internals *p);
+
+void
+softnic_swq_free(struct pmd_internals *p);
+
+void
+softnic_softnic_swq_free_keep_rxq_txq(struct pmd_internals *p);
+
+struct softnic_swq *
+softnic_swq_find(struct pmd_internals *p,
+ const char *name);
+struct softnic_swq *
+softnic_swq_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_swq_params *params);
+
+/**
+ * LINK
+ */
int
-tm_init(struct pmd_internals *p, struct pmd_params *params, int numa_node);
+softnic_link_init(struct pmd_internals *p);
void
-tm_free(struct pmd_internals *p);
+softnic_link_free(struct pmd_internals *p);
+
+struct softnic_link *
+softnic_link_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_link *
+softnic_link_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_link_params *params);
+/**
+ * TMGR
+ */
int
-tm_start(struct pmd_internals *p);
+softnic_tmgr_init(struct pmd_internals *p);
void
-tm_stop(struct pmd_internals *p);
+softnic_tmgr_free(struct pmd_internals *p);
-static inline int
-tm_enabled(struct rte_eth_dev *dev)
-{
- struct pmd_internals *p = dev->data->dev_private;
+struct softnic_tmgr_port *
+softnic_tmgr_port_find(struct pmd_internals *p,
+ const char *name);
- return (p->params.soft.flags & PMD_FEATURE_TM);
-}
+struct softnic_tmgr_port *
+softnic_tmgr_port_create(struct pmd_internals *p,
+ const char *name);
+
+void
+tm_hierarchy_init(struct pmd_internals *p);
+
+void
+tm_hierarchy_free(struct pmd_internals *p);
static inline int
tm_used(struct rte_eth_dev *dev)
{
struct pmd_internals *p = dev->data->dev_private;
- return (p->params.soft.flags & PMD_FEATURE_TM) &&
- p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
+ return p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
}
+extern const struct rte_tm_ops pmd_tm_ops;
+
+/**
+ * TAP
+ */
+int
+softnic_tap_init(struct pmd_internals *p);
+
+void
+softnic_tap_free(struct pmd_internals *p);
+
+struct softnic_tap *
+softnic_tap_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_tap *
+softnic_tap_create(struct pmd_internals *p,
+ const char *name);
+
+/**
+ * Input port action
+ */
+int
+softnic_port_in_action_profile_init(struct pmd_internals *p);
+
+void
+softnic_port_in_action_profile_free(struct pmd_internals *p);
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_port_in_action_profile_params *params);
+
+/**
+ * Table action
+ */
+int
+softnic_table_action_profile_init(struct pmd_internals *p);
+
+void
+softnic_table_action_profile_free(struct pmd_internals *p);
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_table_action_profile_params *params);
+
+/**
+ * Pipeline
+ */
+int
+softnic_pipeline_init(struct pmd_internals *p);
+
+void
+softnic_pipeline_free(struct pmd_internals *p);
+
+void
+softnic_pipeline_disable_all(struct pmd_internals *p);
+
+struct pipeline *
+softnic_pipeline_find(struct pmd_internals *p, const char *name);
+
+struct pipeline *
+softnic_pipeline_create(struct pmd_internals *p,
+ const char *name,
+ struct pipeline_params *params);
+
+int
+softnic_pipeline_port_in_create(struct pmd_internals *p,
+ const char *pipeline_name,
+ struct softnic_port_in_params *params,
+ int enabled);
+
+int
+softnic_pipeline_port_in_connect_to_table(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id,
+ uint32_t table_id);
+
+int
+softnic_pipeline_port_out_create(struct pmd_internals *p,
+ const char *pipeline_name,
+ struct softnic_port_out_params *params);
+
+int
+softnic_pipeline_table_create(struct pmd_internals *p,
+ const char *pipeline_name,
+ struct softnic_table_params *params);
+
+struct softnic_table_rule_match_acl {
+ int ip_version;
+
+ RTE_STD_C11
+ union {
+ struct {
+ uint32_t sa;
+ uint32_t da;
+ } ipv4;
+
+ struct {
+ uint8_t sa[16];
+ uint8_t da[16];
+ } ipv6;
+ };
+
+ uint32_t sa_depth;
+ uint32_t da_depth;
+ uint16_t sp0;
+ uint16_t sp1;
+ uint16_t dp0;
+ uint16_t dp1;
+ uint8_t proto;
+ uint8_t proto_mask;
+ uint32_t priority;
+};
+
+struct softnic_table_rule_match_array {
+ uint32_t pos;
+};
+
+#ifndef TABLE_RULE_MATCH_SIZE_MAX
+#define TABLE_RULE_MATCH_SIZE_MAX 256
+#endif
+
+struct softnic_table_rule_match_hash {
+ uint8_t key[TABLE_RULE_MATCH_SIZE_MAX];
+};
+
+struct softnic_table_rule_match_lpm {
+ int ip_version;
+
+ RTE_STD_C11
+ union {
+ uint32_t ipv4;
+ uint8_t ipv6[16];
+ };
+
+ uint8_t depth;
+};
+
+struct softnic_table_rule_match {
+ enum softnic_table_type match_type;
+
+ union {
+ struct softnic_table_rule_match_acl acl;
+ struct softnic_table_rule_match_array array;
+ struct softnic_table_rule_match_hash hash;
+ struct softnic_table_rule_match_lpm lpm;
+ } match;
+};
+
+struct softnic_table_rule_action {
+ uint64_t action_mask;
+ struct rte_table_action_fwd_params fwd;
+ struct rte_table_action_lb_params lb;
+ struct rte_table_action_mtr_params mtr;
+ struct rte_table_action_tm_params tm;
+ struct rte_table_action_encap_params encap;
+ struct rte_table_action_nat_params nat;
+ struct rte_table_action_ttl_params ttl;
+ struct rte_table_action_stats_params stats;
+ struct rte_table_action_time_params time;
+};
+
+int
+softnic_pipeline_port_in_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats,
+ int clear);
+
+int
+softnic_pipeline_port_in_enable(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id);
+
+int
+softnic_pipeline_port_in_disable(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id);
+
+int
+softnic_pipeline_port_out_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats,
+ int clear);
+
+int
+softnic_pipeline_table_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats,
+ int clear);
+
+int
+softnic_pipeline_table_rule_add(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data);
+
+int
+softnic_pipeline_table_rule_add_bulk(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data,
+ uint32_t *n_rules);
+
+int
+softnic_pipeline_table_rule_add_default(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_action *action,
+ void **data);
+
+int
+softnic_pipeline_table_rule_delete(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match);
+
+int
+softnic_pipeline_table_rule_delete_default(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id);
+
+int
+softnic_pipeline_table_rule_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear);
+
+int
+softnic_pipeline_table_mtr_profile_add(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile);
+
+int
+softnic_pipeline_table_mtr_profile_delete(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id);
+
+int
+softnic_pipeline_table_rule_mtr_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear);
+
+int
+softnic_pipeline_table_dscp_table_update(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *dscp_table);
+
+int
+softnic_pipeline_table_rule_ttl_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear);
+
+/**
+ * Thread
+ */
+int
+softnic_thread_init(struct pmd_internals *p);
+
+void
+softnic_thread_free(struct pmd_internals *p);
+
+int
+softnic_thread_pipeline_enable(struct pmd_internals *p,
+ uint32_t thread_id,
+ const char *pipeline_name);
+
+int
+softnic_thread_pipeline_disable(struct pmd_internals *p,
+ uint32_t thread_id,
+ const char *pipeline_name);
+
+/**
+ * CLI
+ */
+void
+softnic_cli_process(char *in,
+ char *out,
+ size_t out_size,
+ void *arg);
+
+int
+softnic_cli_script_process(struct pmd_internals *softnic,
+ const char *file_name,
+ size_t msg_in_len_max,
+ size_t msg_out_len_max);
+
#endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */
diff --git a/drivers/net/softnic/rte_eth_softnic_link.c b/drivers/net/softnic/rte_eth_softnic_link.c
new file mode 100644
index 00000000..d669913a
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_link.c
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+int
+softnic_link_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->link_list);
+
+ return 0;
+}
+
+void
+softnic_link_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_link *link;
+
+ link = TAILQ_FIRST(&p->link_list);
+ if (link == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->link_list, link, node);
+ free(link);
+ }
+}
+
+struct softnic_link *
+softnic_link_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_link *link;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(link, &p->link_list, node)
+ if (strcmp(link->name, name) == 0)
+ return link;
+
+ return NULL;
+}
+
+struct softnic_link *
+softnic_link_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_link_params *params)
+{
+ struct rte_eth_dev_info port_info;
+ struct softnic_link *link;
+ uint16_t port_id;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_link_find(p, name) ||
+ params == NULL)
+ return NULL;
+
+ port_id = params->port_id;
+ if (params->dev_name) {
+ int status;
+
+ status = rte_eth_dev_get_port_by_name(params->dev_name,
+ &port_id);
+
+ if (status)
+ return NULL;
+ } else {
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return NULL;
+ }
+
+ rte_eth_dev_info_get(port_id, &port_info);
+
+ /* Node allocation */
+ link = calloc(1, sizeof(struct softnic_link));
+ if (link == NULL)
+ return NULL;
+
+ /* Node fill in */
+ strlcpy(link->name, name, sizeof(link->name));
+ link->port_id = port_id;
+ link->n_rxq = port_info.nb_rx_queues;
+ link->n_txq = port_info.nb_tx_queues;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->link_list, link, node);
+
+ return link;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_mempool.c b/drivers/net/softnic/rte_eth_softnic_mempool.c
new file mode 100644
index 00000000..d5c569f9
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_mempool.c
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+#define BUFFER_SIZE_MIN (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+int
+softnic_mempool_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->mempool_list);
+
+ return 0;
+}
+
+void
+softnic_mempool_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_mempool *mempool;
+
+ mempool = TAILQ_FIRST(&p->mempool_list);
+ if (mempool == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->mempool_list, mempool, node);
+ rte_mempool_free(mempool->m);
+ free(mempool);
+ }
+}
+
+struct softnic_mempool *
+softnic_mempool_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_mempool *mempool;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(mempool, &p->mempool_list, node)
+ if (strcmp(mempool->name, name) == 0)
+ return mempool;
+
+ return NULL;
+}
+
+struct softnic_mempool *
+softnic_mempool_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_mempool_params *params)
+{
+ char mempool_name[NAME_SIZE];
+ struct softnic_mempool *mempool;
+ struct rte_mempool *m;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_mempool_find(p, name) ||
+ params == NULL ||
+ params->buffer_size < BUFFER_SIZE_MIN ||
+ params->pool_size == 0)
+ return NULL;
+
+ /* Resource create */
+ snprintf(mempool_name, sizeof(mempool_name), "%s_%s",
+ p->params.name,
+ name);
+
+ m = rte_pktmbuf_pool_create(mempool_name,
+ params->pool_size,
+ params->cache_size,
+ 0,
+ params->buffer_size - sizeof(struct rte_mbuf),
+ p->params.cpu_id);
+
+ if (m == NULL)
+ return NULL;
+
+ /* Node allocation */
+ mempool = calloc(1, sizeof(struct softnic_mempool));
+ if (mempool == NULL) {
+ rte_mempool_free(m);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(mempool->name, name, sizeof(mempool->name));
+ mempool->m = m;
+ mempool->buffer_size = params->buffer_size;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->mempool_list, mempool, node);
+
+ return mempool;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_pipeline.c b/drivers/net/softnic/rte_eth_softnic_pipeline.c
new file mode 100644
index 00000000..45136a4a
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_pipeline.c
@@ -0,0 +1,966 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
+#include <rte_string_fns.h>
+#include <rte_port_ethdev.h>
+#include <rte_port_ring.h>
+#include <rte_port_source_sink.h>
+#include <rte_port_fd.h>
+#include <rte_port_sched.h>
+
+#include <rte_table_acl.h>
+#include <rte_table_array.h>
+#include <rte_table_hash.h>
+#include <rte_table_lpm.h>
+#include <rte_table_lpm_ipv6.h>
+#include <rte_table_stub.h>
+
+#include "rte_eth_softnic_internals.h"
+
+#include "hash_func.h"
+
+#ifndef PIPELINE_MSGQ_SIZE
+#define PIPELINE_MSGQ_SIZE 64
+#endif
+
+#ifndef TABLE_LPM_NUMBER_TBL8
+#define TABLE_LPM_NUMBER_TBL8 256
+#endif
+
+int
+softnic_pipeline_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->pipeline_list);
+
+ return 0;
+}
+
+void
+softnic_pipeline_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct pipeline *pipeline;
+
+ pipeline = TAILQ_FIRST(&p->pipeline_list);
+ if (pipeline == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->pipeline_list, pipeline, node);
+ rte_ring_free(pipeline->msgq_req);
+ rte_ring_free(pipeline->msgq_rsp);
+ rte_pipeline_free(pipeline->p);
+ free(pipeline);
+ }
+}
+
+void
+softnic_pipeline_disable_all(struct pmd_internals *p)
+{
+ struct pipeline *pipeline;
+
+ TAILQ_FOREACH(pipeline, &p->pipeline_list, node)
+ if (pipeline->enabled)
+ softnic_thread_pipeline_disable(p,
+ pipeline->thread_id,
+ pipeline->name);
+}
+
+struct pipeline *
+softnic_pipeline_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct pipeline *pipeline;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(pipeline, &p->pipeline_list, node)
+ if (strcmp(name, pipeline->name) == 0)
+ return pipeline;
+
+ return NULL;
+}
+
+struct pipeline *
+softnic_pipeline_create(struct pmd_internals *softnic,
+ const char *name,
+ struct pipeline_params *params)
+{
+ char resource_name[NAME_MAX];
+ struct rte_pipeline_params pp;
+ struct pipeline *pipeline;
+ struct rte_pipeline *p;
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_pipeline_find(softnic, name) ||
+ params == NULL ||
+ params->timer_period_ms == 0)
+ return NULL;
+
+ /* Resource create */
+ snprintf(resource_name, sizeof(resource_name), "%s-%s-REQ",
+ softnic->params.name,
+ name);
+
+ msgq_req = rte_ring_create(resource_name,
+ PIPELINE_MSGQ_SIZE,
+ softnic->params.cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (msgq_req == NULL)
+ return NULL;
+
+ snprintf(resource_name, sizeof(resource_name), "%s-%s-RSP",
+ softnic->params.name,
+ name);
+
+ msgq_rsp = rte_ring_create(resource_name,
+ PIPELINE_MSGQ_SIZE,
+ softnic->params.cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (msgq_rsp == NULL) {
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ snprintf(resource_name, sizeof(resource_name), "%s_%s",
+ softnic->params.name,
+ name);
+
+ pp.name = resource_name;
+ pp.socket_id = (int)softnic->params.cpu_id;
+ pp.offset_port_id = params->offset_port_id;
+
+ p = rte_pipeline_create(&pp);
+ if (p == NULL) {
+ rte_ring_free(msgq_rsp);
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ /* Node allocation */
+ pipeline = calloc(1, sizeof(struct pipeline));
+ if (pipeline == NULL) {
+ rte_pipeline_free(p);
+ rte_ring_free(msgq_rsp);
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(pipeline->name, name, sizeof(pipeline->name));
+ pipeline->p = p;
+ pipeline->n_ports_in = 0;
+ pipeline->n_ports_out = 0;
+ pipeline->n_tables = 0;
+ pipeline->msgq_req = msgq_req;
+ pipeline->msgq_rsp = msgq_rsp;
+ pipeline->timer_period_ms = params->timer_period_ms;
+ pipeline->enabled = 0;
+ pipeline->cpu_id = softnic->params.cpu_id;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&softnic->pipeline_list, pipeline, node);
+
+ return pipeline;
+}
+
+int
+softnic_pipeline_port_in_create(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ struct softnic_port_in_params *params,
+ int enabled)
+{
+ struct rte_pipeline_port_in_params p;
+
+ union {
+ struct rte_port_ethdev_reader_params ethdev;
+ struct rte_port_ring_reader_params ring;
+ struct rte_port_sched_reader_params sched;
+ struct rte_port_fd_reader_params fd;
+ struct rte_port_source_params source;
+ } pp;
+
+ struct pipeline *pipeline;
+ struct softnic_port_in *port_in;
+ struct softnic_port_in_action_profile *ap;
+ struct rte_port_in_action *action;
+ uint32_t port_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ params == NULL ||
+ params->burst_size == 0 ||
+ params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL)
+ return -1;
+
+ ap = NULL;
+ if (params->action_profile_name) {
+ ap = softnic_port_in_action_profile_find(softnic,
+ params->action_profile_name);
+ if (ap == NULL)
+ return -1;
+ }
+
+ switch (params->type) {
+ case PORT_IN_RXQ:
+ {
+ struct softnic_link *link;
+
+ link = softnic_link_find(softnic, params->dev_name);
+ if (link == NULL)
+ return -1;
+
+ if (params->rxq.queue_id >= link->n_rxq)
+ return -1;
+
+ pp.ethdev.port_id = link->port_id;
+ pp.ethdev.queue_id = params->rxq.queue_id;
+
+ p.ops = &rte_port_ethdev_reader_ops;
+ p.arg_create = &pp.ethdev;
+ break;
+ }
+
+ case PORT_IN_SWQ:
+ {
+ struct softnic_swq *swq;
+
+ swq = softnic_swq_find(softnic, params->dev_name);
+ if (swq == NULL)
+ return -1;
+
+ pp.ring.ring = swq->r;
+
+ p.ops = &rte_port_ring_reader_ops;
+ p.arg_create = &pp.ring;
+ break;
+ }
+
+ case PORT_IN_TMGR:
+ {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(softnic, params->dev_name);
+ if (tmgr_port == NULL)
+ return -1;
+
+ pp.sched.sched = tmgr_port->s;
+
+ p.ops = &rte_port_sched_reader_ops;
+ p.arg_create = &pp.sched;
+ break;
+ }
+
+ case PORT_IN_TAP:
+ {
+ struct softnic_tap *tap;
+ struct softnic_mempool *mempool;
+
+ tap = softnic_tap_find(softnic, params->dev_name);
+ mempool = softnic_mempool_find(softnic, params->tap.mempool_name);
+ if (tap == NULL || mempool == NULL)
+ return -1;
+
+ pp.fd.fd = tap->fd;
+ pp.fd.mempool = mempool->m;
+ pp.fd.mtu = params->tap.mtu;
+
+ p.ops = &rte_port_fd_reader_ops;
+ p.arg_create = &pp.fd;
+ break;
+ }
+
+ case PORT_IN_SOURCE:
+ {
+ struct softnic_mempool *mempool;
+
+ mempool = softnic_mempool_find(softnic, params->source.mempool_name);
+ if (mempool == NULL)
+ return -1;
+
+ pp.source.mempool = mempool->m;
+ pp.source.file_name = params->source.file_name;
+ pp.source.n_bytes_per_pkt = params->source.n_bytes_per_pkt;
+
+ p.ops = &rte_port_source_ops;
+ p.arg_create = &pp.source;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ p.burst_size = params->burst_size;
+
+ /* Resource create */
+ action = NULL;
+ p.f_action = NULL;
+ p.arg_ah = NULL;
+
+ if (ap) {
+ action = rte_port_in_action_create(ap->ap,
+ softnic->params.cpu_id);
+ if (action == NULL)
+ return -1;
+
+ status = rte_port_in_action_params_get(action,
+ &p);
+ if (status) {
+ rte_port_in_action_free(action);
+ return -1;
+ }
+ }
+
+ status = rte_pipeline_port_in_create(pipeline->p,
+ &p,
+ &port_id);
+ if (status) {
+ rte_port_in_action_free(action);
+ return -1;
+ }
+
+ if (enabled)
+ rte_pipeline_port_in_enable(pipeline->p, port_id);
+
+ /* Pipeline */
+ port_in = &pipeline->port_in[pipeline->n_ports_in];
+ memcpy(&port_in->params, params, sizeof(*params));
+ port_in->ap = ap;
+ port_in->a = action;
+ pipeline->n_ports_in++;
+
+ return 0;
+}
+
+int
+softnic_pipeline_port_in_connect_to_table(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id,
+ uint32_t table_id)
+{
+ struct pipeline *pipeline;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL ||
+ port_id >= pipeline->n_ports_in ||
+ table_id >= pipeline->n_tables)
+ return -1;
+
+ /* Resource */
+ status = rte_pipeline_port_in_connect_to_table(pipeline->p,
+ port_id,
+ table_id);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_out_create(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ struct softnic_port_out_params *params)
+{
+ struct rte_pipeline_port_out_params p;
+
+ union {
+ struct rte_port_ethdev_writer_params ethdev;
+ struct rte_port_ring_writer_params ring;
+ struct rte_port_sched_writer_params sched;
+ struct rte_port_fd_writer_params fd;
+ struct rte_port_sink_params sink;
+ } pp;
+
+ union {
+ struct rte_port_ethdev_writer_nodrop_params ethdev;
+ struct rte_port_ring_writer_nodrop_params ring;
+ struct rte_port_fd_writer_nodrop_params fd;
+ } pp_nodrop;
+
+ struct pipeline *pipeline;
+ uint32_t port_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+ memset(&pp_nodrop, 0, sizeof(pp_nodrop));
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ params == NULL ||
+ params->burst_size == 0 ||
+ params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL)
+ return -1;
+
+ switch (params->type) {
+ case PORT_OUT_TXQ:
+ {
+ struct softnic_link *link;
+
+ link = softnic_link_find(softnic, params->dev_name);
+ if (link == NULL)
+ return -1;
+
+ if (params->txq.queue_id >= link->n_txq)
+ return -1;
+
+ pp.ethdev.port_id = link->port_id;
+ pp.ethdev.queue_id = params->txq.queue_id;
+ pp.ethdev.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.ethdev.port_id = link->port_id;
+ pp_nodrop.ethdev.queue_id = params->txq.queue_id;
+ pp_nodrop.ethdev.tx_burst_sz = params->burst_size;
+ pp_nodrop.ethdev.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_ethdev_writer_ops;
+ p.arg_create = &pp.ethdev;
+ } else {
+ p.ops = &rte_port_ethdev_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.ethdev;
+ }
+ break;
+ }
+
+ case PORT_OUT_SWQ:
+ {
+ struct softnic_swq *swq;
+
+ swq = softnic_swq_find(softnic, params->dev_name);
+ if (swq == NULL)
+ return -1;
+
+ pp.ring.ring = swq->r;
+ pp.ring.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.ring.ring = swq->r;
+ pp_nodrop.ring.tx_burst_sz = params->burst_size;
+ pp_nodrop.ring.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_ring_writer_ops;
+ p.arg_create = &pp.ring;
+ } else {
+ p.ops = &rte_port_ring_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.ring;
+ }
+ break;
+ }
+
+ case PORT_OUT_TMGR:
+ {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(softnic, params->dev_name);
+ if (tmgr_port == NULL)
+ return -1;
+
+ pp.sched.sched = tmgr_port->s;
+ pp.sched.tx_burst_sz = params->burst_size;
+
+ p.ops = &rte_port_sched_writer_ops;
+ p.arg_create = &pp.sched;
+ break;
+ }
+
+ case PORT_OUT_TAP:
+ {
+ struct softnic_tap *tap;
+
+ tap = softnic_tap_find(softnic, params->dev_name);
+ if (tap == NULL)
+ return -1;
+
+ pp.fd.fd = tap->fd;
+ pp.fd.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.fd.fd = tap->fd;
+ pp_nodrop.fd.tx_burst_sz = params->burst_size;
+ pp_nodrop.fd.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_fd_writer_ops;
+ p.arg_create = &pp.fd;
+ } else {
+ p.ops = &rte_port_fd_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.fd;
+ }
+ break;
+ }
+
+ case PORT_OUT_SINK:
+ {
+ pp.sink.file_name = params->sink.file_name;
+ pp.sink.max_n_pkts = params->sink.max_n_pkts;
+
+ p.ops = &rte_port_sink_ops;
+ p.arg_create = &pp.sink;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ p.f_action = NULL;
+ p.arg_ah = NULL;
+
+ /* Resource create */
+ status = rte_pipeline_port_out_create(pipeline->p,
+ &p,
+ &port_id);
+
+ if (status)
+ return -1;
+
+ /* Pipeline */
+ pipeline->n_ports_out++;
+
+ return 0;
+}
+
+static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 3,
+ .offset = sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = offsetof(struct ipv6_hdr, proto),
+ },
+
+ /* Source IP address (IPv6) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = offsetof(struct ipv6_hdr, src_addr[0]),
+ },
+
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = offsetof(struct ipv6_hdr, src_addr[4]),
+ },
+
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = offsetof(struct ipv6_hdr, src_addr[8]),
+ },
+
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 4,
+ .input_index = 4,
+ .offset = offsetof(struct ipv6_hdr, src_addr[12]),
+ },
+
+ /* Destination IP address (IPv6) */
+ [5] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 5,
+ .input_index = 5,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[0]),
+ },
+
+ [6] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 6,
+ .input_index = 6,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[4]),
+ },
+
+ [7] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 7,
+ .input_index = 7,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[8]),
+ },
+
+ [8] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 8,
+ .input_index = 8,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[12]),
+ },
+
+ /* Source Port */
+ [9] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 9,
+ .input_index = 9,
+ .offset = sizeof(struct ipv6_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [10] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 10,
+ .input_index = 9,
+ .offset = sizeof(struct ipv6_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+int
+softnic_pipeline_table_create(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ struct softnic_table_params *params)
+{
+ char name[NAME_MAX];
+ struct rte_pipeline_table_params p;
+
+ union {
+ struct rte_table_acl_params acl;
+ struct rte_table_array_params array;
+ struct rte_table_hash_params hash;
+ struct rte_table_lpm_params lpm;
+ struct rte_table_lpm_ipv6_params lpm_ipv6;
+ } pp;
+
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ struct softnic_table_action_profile *ap;
+ struct rte_table_action *action;
+ uint32_t table_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ params == NULL)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL ||
+ pipeline->n_tables >= RTE_PIPELINE_TABLE_MAX)
+ return -1;
+
+ ap = NULL;
+ if (params->action_profile_name) {
+ ap = softnic_table_action_profile_find(softnic,
+ params->action_profile_name);
+ if (ap == NULL)
+ return -1;
+ }
+
+ snprintf(name, NAME_MAX, "%s_%s_table%u",
+ softnic->params.name, pipeline_name, pipeline->n_tables);
+
+ switch (params->match_type) {
+ case TABLE_ACL:
+ {
+ uint32_t ip_header_offset = params->match.acl.ip_header_offset -
+ (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
+ uint32_t i;
+
+ if (params->match.acl.n_rules == 0)
+ return -1;
+
+ pp.acl.name = name;
+ pp.acl.n_rules = params->match.acl.n_rules;
+ if (params->match.acl.ip_version) {
+ memcpy(&pp.acl.field_format,
+ &table_acl_field_format_ipv4,
+ sizeof(table_acl_field_format_ipv4));
+ pp.acl.n_rule_fields =
+ RTE_DIM(table_acl_field_format_ipv4);
+ } else {
+ memcpy(&pp.acl.field_format,
+ &table_acl_field_format_ipv6,
+ sizeof(table_acl_field_format_ipv6));
+ pp.acl.n_rule_fields =
+ RTE_DIM(table_acl_field_format_ipv6);
+ }
+
+ for (i = 0; i < pp.acl.n_rule_fields; i++)
+ pp.acl.field_format[i].offset += ip_header_offset;
+
+ p.ops = &rte_table_acl_ops;
+ p.arg_create = &pp.acl;
+ break;
+ }
+
+ case TABLE_ARRAY:
+ {
+ if (params->match.array.n_keys == 0)
+ return -1;
+
+ pp.array.n_entries = params->match.array.n_keys;
+ pp.array.offset = params->match.array.key_offset;
+
+ p.ops = &rte_table_array_ops;
+ p.arg_create = &pp.array;
+ break;
+ }
+
+ case TABLE_HASH:
+ {
+ struct rte_table_ops *ops;
+ rte_table_hash_op_hash f_hash;
+
+ if (params->match.hash.n_keys == 0)
+ return -1;
+
+ switch (params->match.hash.key_size) {
+ case 8:
+ f_hash = hash_default_key8;
+ break;
+ case 16:
+ f_hash = hash_default_key16;
+ break;
+ case 24:
+ f_hash = hash_default_key24;
+ break;
+ case 32:
+ f_hash = hash_default_key32;
+ break;
+ case 40:
+ f_hash = hash_default_key40;
+ break;
+ case 48:
+ f_hash = hash_default_key48;
+ break;
+ case 56:
+ f_hash = hash_default_key56;
+ break;
+ case 64:
+ f_hash = hash_default_key64;
+ break;
+ default:
+ return -1;
+ }
+
+ pp.hash.name = name;
+ pp.hash.key_size = params->match.hash.key_size;
+ pp.hash.key_offset = params->match.hash.key_offset;
+ pp.hash.key_mask = params->match.hash.key_mask;
+ pp.hash.n_keys = params->match.hash.n_keys;
+ pp.hash.n_buckets = params->match.hash.n_buckets;
+ pp.hash.f_hash = f_hash;
+ pp.hash.seed = 0;
+
+ if (params->match.hash.extendable_bucket)
+ switch (params->match.hash.key_size) {
+ case 8:
+ ops = &rte_table_hash_key8_ext_ops;
+ break;
+ case 16:
+ ops = &rte_table_hash_key16_ext_ops;
+ break;
+ default:
+ ops = &rte_table_hash_ext_ops;
+ }
+ else
+ switch (params->match.hash.key_size) {
+ case 8:
+ ops = &rte_table_hash_key8_lru_ops;
+ break;
+ case 16:
+ ops = &rte_table_hash_key16_lru_ops;
+ break;
+ default:
+ ops = &rte_table_hash_lru_ops;
+ }
+
+ p.ops = ops;
+ p.arg_create = &pp.hash;
+ break;
+ }
+
+ case TABLE_LPM:
+ {
+ if (params->match.lpm.n_rules == 0)
+ return -1;
+
+ switch (params->match.lpm.key_size) {
+ case 4:
+ {
+ pp.lpm.name = name;
+ pp.lpm.n_rules = params->match.lpm.n_rules;
+ pp.lpm.number_tbl8s = TABLE_LPM_NUMBER_TBL8;
+ pp.lpm.flags = 0;
+ pp.lpm.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ pp.lpm.offset = params->match.lpm.key_offset;
+
+ p.ops = &rte_table_lpm_ops;
+ p.arg_create = &pp.lpm;
+ break;
+ }
+
+ case 16:
+ {
+ pp.lpm_ipv6.name = name;
+ pp.lpm_ipv6.n_rules = params->match.lpm.n_rules;
+ pp.lpm_ipv6.number_tbl8s = TABLE_LPM_NUMBER_TBL8;
+ pp.lpm_ipv6.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ pp.lpm_ipv6.offset = params->match.lpm.key_offset;
+
+ p.ops = &rte_table_lpm_ipv6_ops;
+ p.arg_create = &pp.lpm_ipv6;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ break;
+ }
+
+ case TABLE_STUB:
+ {
+ p.ops = &rte_table_stub_ops;
+ p.arg_create = NULL;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ /* Resource create */
+ action = NULL;
+ p.f_action_hit = NULL;
+ p.f_action_miss = NULL;
+ p.arg_ah = NULL;
+
+ if (ap) {
+ action = rte_table_action_create(ap->ap,
+ softnic->params.cpu_id);
+ if (action == NULL)
+ return -1;
+
+ status = rte_table_action_table_params_get(action,
+ &p);
+ if (status ||
+ ((p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry)) >
+ TABLE_RULE_ACTION_SIZE_MAX)) {
+ rte_table_action_free(action);
+ return -1;
+ }
+ }
+
+ if (params->match_type == TABLE_LPM) {
+ if (params->match.lpm.key_size == 4)
+ pp.lpm.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+
+ if (params->match.lpm.key_size == 16)
+ pp.lpm_ipv6.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ }
+
+ status = rte_pipeline_table_create(pipeline->p,
+ &p,
+ &table_id);
+ if (status) {
+ rte_table_action_free(action);
+ return -1;
+ }
+
+ /* Pipeline */
+ table = &pipeline->table[pipeline->n_tables];
+ memcpy(&table->params, params, sizeof(*params));
+ table->ap = ap;
+ table->a = action;
+ pipeline->n_tables++;
+
+ return 0;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_swq.c b/drivers/net/softnic/rte_eth_softnic_swq.c
new file mode 100644
index 00000000..2083d0a9
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_swq.c
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_string_fns.h>
+#include <rte_tailq.h>
+
+#include "rte_eth_softnic_internals.h"
+
+int
+softnic_swq_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->swq_list);
+
+ return 0;
+}
+
+void
+softnic_swq_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_swq *swq;
+
+ swq = TAILQ_FIRST(&p->swq_list);
+ if (swq == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->swq_list, swq, node);
+ rte_ring_free(swq->r);
+ free(swq);
+ }
+}
+
+void
+softnic_softnic_swq_free_keep_rxq_txq(struct pmd_internals *p)
+{
+ struct softnic_swq *swq, *tswq;
+
+ TAILQ_FOREACH_SAFE(swq, &p->swq_list, node, tswq) {
+ if ((strncmp(swq->name, "RXQ", strlen("RXQ")) == 0) ||
+ (strncmp(swq->name, "TXQ", strlen("TXQ")) == 0))
+ continue;
+
+ TAILQ_REMOVE(&p->swq_list, swq, node);
+ rte_ring_free(swq->r);
+ free(swq);
+ }
+}
+
+struct softnic_swq *
+softnic_swq_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_swq *swq;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(swq, &p->swq_list, node)
+ if (strcmp(swq->name, name) == 0)
+ return swq;
+
+ return NULL;
+}
+
+struct softnic_swq *
+softnic_swq_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_swq_params *params)
+{
+ char ring_name[NAME_SIZE];
+ struct softnic_swq *swq;
+ struct rte_ring *r;
+ unsigned int flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_swq_find(p, name) ||
+ params == NULL ||
+ params->size == 0)
+ return NULL;
+
+ /* Resource create */
+ snprintf(ring_name, sizeof(ring_name), "%s_%s",
+ p->params.name,
+ name);
+
+ r = rte_ring_create(ring_name,
+ params->size,
+ p->params.cpu_id,
+ flags);
+
+ if (r == NULL)
+ return NULL;
+
+ /* Node allocation */
+ swq = calloc(1, sizeof(struct softnic_swq));
+ if (swq == NULL) {
+ rte_ring_free(r);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(swq->name, name, sizeof(swq->name));
+ swq->r = r;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->swq_list, swq, node);
+
+ return swq;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_tap.c b/drivers/net/softnic/rte_eth_softnic_tap.c
new file mode 100644
index 00000000..bcc23a9f
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_tap.c
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <netinet/in.h>
+#ifdef RTE_EXEC_ENV_LINUXAPP
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#endif
+#include <sys/ioctl.h>
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+#define TAP_DEV "/dev/net/tun"
+
+int
+softnic_tap_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->tap_list);
+
+ return 0;
+}
+
+void
+softnic_tap_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_tap *tap;
+
+ tap = TAILQ_FIRST(&p->tap_list);
+ if (tap == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->tap_list, tap, node);
+ free(tap);
+ }
+}
+
+struct softnic_tap *
+softnic_tap_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tap *tap;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(tap, &p->tap_list, node)
+ if (strcmp(tap->name, name) == 0)
+ return tap;
+
+ return NULL;
+}
+
+#ifndef RTE_EXEC_ENV_LINUXAPP
+
+struct softnic_tap *
+softnic_tap_create(struct pmd_internals *p __rte_unused,
+ const char *name __rte_unused)
+{
+ return NULL;
+}
+
+#else
+
+struct softnic_tap *
+softnic_tap_create(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tap *tap;
+ struct ifreq ifr;
+ int fd, status;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_tap_find(p, name))
+ return NULL;
+
+ /* Resource create */
+ fd = open(TAP_DEV, O_RDWR | O_NONBLOCK);
+ if (fd < 0)
+ return NULL;
+
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", name);
+
+ status = ioctl(fd, TUNSETIFF, (void *)&ifr);
+ if (status < 0) {
+ close(fd);
+ return NULL;
+ }
+
+ /* Node allocation */
+ tap = calloc(1, sizeof(struct softnic_tap));
+ if (tap == NULL) {
+ close(fd);
+ return NULL;
+ }
+ /* Node fill in */
+ strlcpy(tap->name, name, sizeof(tap->name));
+ tap->fd = fd;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->tap_list, tap, node);
+
+ return tap;
+}
+
+#endif
diff --git a/drivers/net/softnic/rte_eth_softnic_thread.c b/drivers/net/softnic/rte_eth_softnic_thread.c
new file mode 100644
index 00000000..8a150903
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_thread.c
@@ -0,0 +1,2929 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_lcore.h>
+#include <rte_ring.h>
+
+#include <rte_table_acl.h>
+#include <rte_table_array.h>
+#include <rte_table_hash.h>
+#include <rte_table_lpm.h>
+#include <rte_table_lpm_ipv6.h>
+#include "rte_eth_softnic_internals.h"
+
+/**
+ * Master thread: data plane thread init
+ */
+void
+softnic_thread_free(struct pmd_internals *softnic)
+{
+ uint32_t i;
+
+ RTE_LCORE_FOREACH_SLAVE(i) {
+ struct softnic_thread *t = &softnic->thread[i];
+
+ /* MSGQs */
+ if (t->msgq_req)
+ rte_ring_free(t->msgq_req);
+
+ if (t->msgq_rsp)
+ rte_ring_free(t->msgq_rsp);
+ }
+}
+
+int
+softnic_thread_init(struct pmd_internals *softnic)
+{
+ uint32_t i;
+
+ RTE_LCORE_FOREACH_SLAVE(i) {
+ char ring_name[NAME_MAX];
+ struct rte_ring *msgq_req, *msgq_rsp;
+ struct softnic_thread *t = &softnic->thread[i];
+ struct softnic_thread_data *t_data = &softnic->thread_data[i];
+ uint32_t cpu_id = rte_lcore_to_socket_id(i);
+
+ /* MSGQs */
+ snprintf(ring_name, sizeof(ring_name), "%s-TH%u-REQ",
+ softnic->params.name,
+ i);
+
+ msgq_req = rte_ring_create(ring_name,
+ THREAD_MSGQ_SIZE,
+ cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (msgq_req == NULL) {
+ softnic_thread_free(softnic);
+ return -1;
+ }
+
+ snprintf(ring_name, sizeof(ring_name), "%s-TH%u-RSP",
+ softnic->params.name,
+ i);
+
+ msgq_rsp = rte_ring_create(ring_name,
+ THREAD_MSGQ_SIZE,
+ cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (msgq_rsp == NULL) {
+ softnic_thread_free(softnic);
+ return -1;
+ }
+
+ /* Master thread records */
+ t->msgq_req = msgq_req;
+ t->msgq_rsp = msgq_rsp;
+ t->enabled = 1;
+
+ /* Data plane thread records */
+ t_data->n_pipelines = 0;
+ t_data->msgq_req = msgq_req;
+ t_data->msgq_rsp = msgq_rsp;
+ t_data->timer_period =
+ (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
+ t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
+ t_data->time_next_min = t_data->time_next;
+ }
+
+ return 0;
+}
+
+static inline int
+thread_is_running(uint32_t thread_id)
+{
+ enum rte_lcore_state_t thread_state;
+
+ thread_state = rte_eal_get_lcore_state(thread_id);
+ return (thread_state == RUNNING)? 1 : 0;
+}
+
+/**
+ * Pipeline is running when:
+ * (A) Pipeline is mapped to a data plane thread AND
+ * (B) Its data plane thread is in RUNNING state.
+ */
+static inline int
+pipeline_is_running(struct pipeline *p)
+{
+ if (p->enabled == 0)
+ return 0;
+
+ return thread_is_running(p->thread_id);
+}
+
+/**
+ * Master thread & data plane threads: message passing
+ */
+enum thread_req_type {
+ THREAD_REQ_PIPELINE_ENABLE = 0,
+ THREAD_REQ_PIPELINE_DISABLE,
+ THREAD_REQ_MAX
+};
+
+struct thread_msg_req {
+ enum thread_req_type type;
+
+ union {
+ struct {
+ struct rte_pipeline *p;
+ struct {
+ struct rte_table_action *a;
+ } table[RTE_PIPELINE_TABLE_MAX];
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint32_t timer_period_ms;
+ uint32_t n_tables;
+ } pipeline_enable;
+
+ struct {
+ struct rte_pipeline *p;
+ } pipeline_disable;
+ };
+};
+
+struct thread_msg_rsp {
+ int status;
+};
+
+/**
+ * Master thread
+ */
+static struct thread_msg_req *
+thread_msg_alloc(void)
+{
+ size_t size = RTE_MAX(sizeof(struct thread_msg_req),
+ sizeof(struct thread_msg_rsp));
+
+ return calloc(1, size);
+}
+
+static void
+thread_msg_free(struct thread_msg_rsp *rsp)
+{
+ free(rsp);
+}
+
+static struct thread_msg_rsp *
+thread_msg_send_recv(struct pmd_internals *softnic,
+ uint32_t thread_id,
+ struct thread_msg_req *req)
+{
+ struct softnic_thread *t = &softnic->thread[thread_id];
+ struct rte_ring *msgq_req = t->msgq_req;
+ struct rte_ring *msgq_rsp = t->msgq_rsp;
+ struct thread_msg_rsp *rsp;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(msgq_req, req);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ do {
+ status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
+ } while (status != 0);
+
+ return rsp;
+}
+
+int
+softnic_thread_pipeline_enable(struct pmd_internals *softnic,
+ uint32_t thread_id,
+ const char *pipeline_name)
+{
+ struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
+ struct softnic_thread *t;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+ uint32_t i;
+ int status;
+
+ /* Check input params */
+ if ((thread_id >= RTE_MAX_LCORE) ||
+ (p == NULL) ||
+ (p->n_ports_in == 0) ||
+ (p->n_ports_out == 0) ||
+ (p->n_tables == 0))
+ return -1;
+
+ t = &softnic->thread[thread_id];
+ if ((t->enabled == 0) ||
+ p->enabled)
+ return -1;
+
+ if (!thread_is_running(thread_id)) {
+ struct softnic_thread_data *td = &softnic->thread_data[thread_id];
+ struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
+
+ if (td->n_pipelines >= THREAD_PIPELINES_MAX)
+ return -1;
+
+ /* Data plane thread */
+ td->p[td->n_pipelines] = p->p;
+
+ tdp->p = p->p;
+ for (i = 0; i < p->n_tables; i++)
+ tdp->table_data[i].a =
+ p->table[i].a;
+ tdp->n_tables = p->n_tables;
+
+ tdp->msgq_req = p->msgq_req;
+ tdp->msgq_rsp = p->msgq_rsp;
+ tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
+ tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
+
+ td->n_pipelines++;
+
+ /* Pipeline */
+ p->thread_id = thread_id;
+ p->enabled = 1;
+
+ return 0;
+ }
+
+ /* Allocate request */
+ req = thread_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = THREAD_REQ_PIPELINE_ENABLE;
+ req->pipeline_enable.p = p->p;
+ for (i = 0; i < p->n_tables; i++)
+ req->pipeline_enable.table[i].a =
+ p->table[i].a;
+ req->pipeline_enable.msgq_req = p->msgq_req;
+ req->pipeline_enable.msgq_rsp = p->msgq_rsp;
+ req->pipeline_enable.timer_period_ms = p->timer_period_ms;
+ req->pipeline_enable.n_tables = p->n_tables;
+
+ /* Send request and wait for response */
+ rsp = thread_msg_send_recv(softnic, thread_id, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ thread_msg_free(rsp);
+
+ /* Request completion */
+ if (status)
+ return status;
+
+ p->thread_id = thread_id;
+ p->enabled = 1;
+
+ return 0;
+}
+
+int
+softnic_thread_pipeline_disable(struct pmd_internals *softnic,
+ uint32_t thread_id,
+ const char *pipeline_name)
+{
+ struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
+ struct softnic_thread *t;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((thread_id >= RTE_MAX_LCORE) ||
+ (p == NULL))
+ return -1;
+
+ t = &softnic->thread[thread_id];
+ if (t->enabled == 0)
+ return -1;
+
+ if (p->enabled == 0)
+ return 0;
+
+ if (p->thread_id != thread_id)
+ return -1;
+
+ if (!thread_is_running(thread_id)) {
+ struct softnic_thread_data *td = &softnic->thread_data[thread_id];
+ uint32_t i;
+
+ for (i = 0; i < td->n_pipelines; i++) {
+ struct pipeline_data *tdp = &td->pipeline_data[i];
+
+ if (tdp->p != p->p)
+ continue;
+
+ /* Data plane thread */
+ if (i < td->n_pipelines - 1) {
+ struct rte_pipeline *pipeline_last =
+ td->p[td->n_pipelines - 1];
+ struct pipeline_data *tdp_last =
+ &td->pipeline_data[td->n_pipelines - 1];
+
+ td->p[i] = pipeline_last;
+ memcpy(tdp, tdp_last, sizeof(*tdp));
+ }
+
+ td->n_pipelines--;
+
+ /* Pipeline */
+ p->enabled = 0;
+
+ break;
+ }
+
+ return 0;
+ }
+
+ /* Allocate request */
+ req = thread_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = THREAD_REQ_PIPELINE_DISABLE;
+ req->pipeline_disable.p = p->p;
+
+ /* Send request and wait for response */
+ rsp = thread_msg_send_recv(softnic, thread_id, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ thread_msg_free(rsp);
+
+ /* Request completion */
+ if (status)
+ return status;
+
+ p->enabled = 0;
+
+ return 0;
+}
+
+/**
+ * Data plane threads: message handling
+ */
+static inline struct thread_msg_req *
+thread_msg_recv(struct rte_ring *msgq_req)
+{
+ struct thread_msg_req *req;
+
+ int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
+
+ if (status != 0)
+ return NULL;
+
+ return req;
+}
+
+static inline void
+thread_msg_send(struct rte_ring *msgq_rsp,
+ struct thread_msg_rsp *rsp)
+{
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(msgq_rsp, rsp);
+ } while (status == -ENOBUFS);
+}
+
+static struct thread_msg_rsp *
+thread_msg_handle_pipeline_enable(struct softnic_thread_data *t,
+ struct thread_msg_req *req)
+{
+ struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
+ struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
+ uint32_t i;
+
+ /* Request */
+ if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ t->p[t->n_pipelines] = req->pipeline_enable.p;
+
+ p->p = req->pipeline_enable.p;
+ for (i = 0; i < req->pipeline_enable.n_tables; i++)
+ p->table_data[i].a =
+ req->pipeline_enable.table[i].a;
+
+ p->n_tables = req->pipeline_enable.n_tables;
+
+ p->msgq_req = req->pipeline_enable.msgq_req;
+ p->msgq_rsp = req->pipeline_enable.msgq_rsp;
+ p->timer_period =
+ (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
+ p->time_next = rte_get_tsc_cycles() + p->timer_period;
+
+ t->n_pipelines++;
+
+ /* Response */
+ rsp->status = 0;
+ return rsp;
+}
+
+static struct thread_msg_rsp *
+thread_msg_handle_pipeline_disable(struct softnic_thread_data *t,
+ struct thread_msg_req *req)
+{
+ struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
+ uint32_t n_pipelines = t->n_pipelines;
+ struct rte_pipeline *pipeline = req->pipeline_disable.p;
+ uint32_t i;
+
+ /* find pipeline */
+ for (i = 0; i < n_pipelines; i++) {
+ struct pipeline_data *p = &t->pipeline_data[i];
+
+ if (p->p != pipeline)
+ continue;
+
+ if (i < n_pipelines - 1) {
+ struct rte_pipeline *pipeline_last =
+ t->p[n_pipelines - 1];
+ struct pipeline_data *p_last =
+ &t->pipeline_data[n_pipelines - 1];
+
+ t->p[i] = pipeline_last;
+ memcpy(p, p_last, sizeof(*p));
+ }
+
+ t->n_pipelines--;
+
+ rsp->status = 0;
+ return rsp;
+ }
+
+ /* should not get here */
+ rsp->status = 0;
+ return rsp;
+}
+
+static void
+thread_msg_handle(struct softnic_thread_data *t)
+{
+ for ( ; ; ) {
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+
+ req = thread_msg_recv(t->msgq_req);
+ if (req == NULL)
+ break;
+
+ switch (req->type) {
+ case THREAD_REQ_PIPELINE_ENABLE:
+ rsp = thread_msg_handle_pipeline_enable(t, req);
+ break;
+
+ case THREAD_REQ_PIPELINE_DISABLE:
+ rsp = thread_msg_handle_pipeline_disable(t, req);
+ break;
+
+ default:
+ rsp = (struct thread_msg_rsp *)req;
+ rsp->status = -1;
+ }
+
+ thread_msg_send(t->msgq_rsp, rsp);
+ }
+}
+
+/**
+ * Master thread & data plane threads: message passing
+ */
+enum pipeline_req_type {
+ /* Port IN */
+ PIPELINE_REQ_PORT_IN_STATS_READ,
+ PIPELINE_REQ_PORT_IN_ENABLE,
+ PIPELINE_REQ_PORT_IN_DISABLE,
+
+ /* Port OUT */
+ PIPELINE_REQ_PORT_OUT_STATS_READ,
+
+ /* Table */
+ PIPELINE_REQ_TABLE_STATS_READ,
+ PIPELINE_REQ_TABLE_RULE_ADD,
+ PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
+ PIPELINE_REQ_TABLE_RULE_ADD_BULK,
+ PIPELINE_REQ_TABLE_RULE_DELETE,
+ PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
+ PIPELINE_REQ_TABLE_RULE_STATS_READ,
+ PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
+ PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
+ PIPELINE_REQ_TABLE_RULE_MTR_READ,
+ PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
+ PIPELINE_REQ_TABLE_RULE_TTL_READ,
+ PIPELINE_REQ_MAX
+};
+
+struct pipeline_msg_req_port_in_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_port_out_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_table_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_table_rule_add {
+ struct softnic_table_rule_match match;
+ struct softnic_table_rule_action action;
+};
+
+struct pipeline_msg_req_table_rule_add_default {
+ struct softnic_table_rule_action action;
+};
+
+struct pipeline_msg_req_table_rule_add_bulk {
+ struct softnic_table_rule_match *match;
+ struct softnic_table_rule_action *action;
+ void **data;
+ uint32_t n_rules;
+ int bulk;
+};
+
+struct pipeline_msg_req_table_rule_delete {
+ struct softnic_table_rule_match match;
+};
+
+struct pipeline_msg_req_table_rule_stats_read {
+ void *data;
+ int clear;
+};
+
+struct pipeline_msg_req_table_mtr_profile_add {
+ uint32_t meter_profile_id;
+ struct rte_table_action_meter_profile profile;
+};
+
+struct pipeline_msg_req_table_mtr_profile_delete {
+ uint32_t meter_profile_id;
+};
+
+struct pipeline_msg_req_table_rule_mtr_read {
+ void *data;
+ uint32_t tc_mask;
+ int clear;
+};
+
+struct pipeline_msg_req_table_dscp_table_update {
+ uint64_t dscp_mask;
+ struct rte_table_action_dscp_table dscp_table;
+};
+
+struct pipeline_msg_req_table_rule_ttl_read {
+ void *data;
+ int clear;
+};
+
+struct pipeline_msg_req {
+ enum pipeline_req_type type;
+ uint32_t id; /* Port IN, port OUT or table ID */
+
+ RTE_STD_C11
+ union {
+ struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
+ struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
+ struct pipeline_msg_req_table_stats_read table_stats_read;
+ struct pipeline_msg_req_table_rule_add table_rule_add;
+ struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
+ struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
+ struct pipeline_msg_req_table_rule_delete table_rule_delete;
+ struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
+ struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
+ struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
+ struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
+ struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
+ struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read;
+ };
+};
+
+struct pipeline_msg_rsp_port_in_stats_read {
+ struct rte_pipeline_port_in_stats stats;
+};
+
+struct pipeline_msg_rsp_port_out_stats_read {
+ struct rte_pipeline_port_out_stats stats;
+};
+
+struct pipeline_msg_rsp_table_stats_read {
+ struct rte_pipeline_table_stats stats;
+};
+
+struct pipeline_msg_rsp_table_rule_add {
+ void *data;
+};
+
+struct pipeline_msg_rsp_table_rule_add_default {
+ void *data;
+};
+
+struct pipeline_msg_rsp_table_rule_add_bulk {
+ uint32_t n_rules;
+};
+
+struct pipeline_msg_rsp_table_rule_stats_read {
+ struct rte_table_action_stats_counters stats;
+};
+
+struct pipeline_msg_rsp_table_rule_mtr_read {
+ struct rte_table_action_mtr_counters stats;
+};
+
+struct pipeline_msg_rsp_table_rule_ttl_read {
+ struct rte_table_action_ttl_counters stats;
+};
+
+struct pipeline_msg_rsp {
+ int status;
+
+ RTE_STD_C11
+ union {
+ struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
+ struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
+ struct pipeline_msg_rsp_table_stats_read table_stats_read;
+ struct pipeline_msg_rsp_table_rule_add table_rule_add;
+ struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
+ struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
+ struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
+ struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
+ struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read;
+ };
+};
+
+/**
+ * Master thread
+ */
+static struct pipeline_msg_req *
+pipeline_msg_alloc(void)
+{
+ size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
+ sizeof(struct pipeline_msg_rsp));
+
+ return calloc(1, size);
+}
+
+static void
+pipeline_msg_free(struct pipeline_msg_rsp *rsp)
+{
+ free(rsp);
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_send_recv(struct pipeline *p,
+ struct pipeline_msg_req *req)
+{
+ struct rte_ring *msgq_req = p->msgq_req;
+ struct rte_ring *msgq_rsp = p->msgq_rsp;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(msgq_req, req);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ do {
+ status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
+ } while (status != 0);
+
+ return rsp;
+}
+
+int
+softnic_pipeline_port_in_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_in)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
+ req->id = port_id;
+ req->port_in_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_in_enable(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_in)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_enable(p->p, port_id);
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_ENABLE;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_in)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_disable(p->p, port_id);
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_DISABLE;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_out_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_out)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
+ req->id = port_id;
+ req->port_out_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_table_stats_read(p->p,
+ table_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_STATS_READ;
+ req->id = table_id;
+ req->table_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+static int
+match_check(struct softnic_table_rule_match *match,
+ struct pipeline *p,
+ uint32_t table_id)
+{
+ struct softnic_table *table;
+
+ if (match == NULL ||
+ p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ table = &p->table[table_id];
+ if (match->match_type != table->params.match_type)
+ return -1;
+
+ switch (match->match_type) {
+ case TABLE_ACL:
+ {
+ struct softnic_table_acl_params *t = &table->params.match.acl;
+ struct softnic_table_rule_match_acl *r = &match->match.acl;
+
+ if ((r->ip_version && (t->ip_version == 0)) ||
+ ((r->ip_version == 0) && t->ip_version))
+ return -1;
+
+ if (r->ip_version) {
+ if (r->sa_depth > 32 ||
+ r->da_depth > 32)
+ return -1;
+ } else {
+ if (r->sa_depth > 128 ||
+ r->da_depth > 128)
+ return -1;
+ }
+ return 0;
+ }
+
+ case TABLE_ARRAY:
+ return 0;
+
+ case TABLE_HASH:
+ return 0;
+
+ case TABLE_LPM:
+ {
+ struct softnic_table_lpm_params *t = &table->params.match.lpm;
+ struct softnic_table_rule_match_lpm *r = &match->match.lpm;
+
+ if ((r->ip_version && (t->key_size != 4)) ||
+ ((r->ip_version == 0) && (t->key_size != 16)))
+ return -1;
+
+ if (r->ip_version) {
+ if (r->depth > 32)
+ return -1;
+ } else {
+ if (r->depth > 128)
+ return -1;
+ }
+ return 0;
+ }
+
+ case TABLE_STUB:
+ return -1;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+action_check(struct softnic_table_rule_action *action,
+ struct pipeline *p,
+ uint32_t table_id)
+{
+ struct softnic_table_action_profile *ap;
+
+ if (action == NULL ||
+ p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ ap = p->table[table_id].ap;
+ if (action->action_mask != ap->params.action_mask)
+ return -1;
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT &&
+ action->fwd.id >= p->n_ports_out)
+ return -1;
+
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE &&
+ action->fwd.id >= p->n_tables)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
+ uint32_t tc_mask1 = action->mtr.tc_mask;
+
+ if (tc_mask1 != tc_mask0)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ uint32_t n_subports_per_port =
+ ap->params.tm.n_subports_per_port;
+ uint32_t n_pipes_per_subport =
+ ap->params.tm.n_pipes_per_subport;
+ uint32_t subport_id = action->tm.subport_id;
+ uint32_t pipe_id = action->tm.pipe_id;
+
+ if (subport_id >= n_subports_per_port ||
+ pipe_id >= n_pipes_per_subport)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ uint64_t encap_mask = ap->params.encap.encap_mask;
+ enum rte_table_action_encap_type type = action->encap.type;
+
+ if ((encap_mask & (1LLU << type)) == 0)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ int ip_version0 = ap->params.common.ip_version;
+ int ip_version1 = action->nat.ip_version;
+
+ if ((ip_version1 && (ip_version0 == 0)) ||
+ ((ip_version1 == 0) && ip_version0))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+action_default_check(struct softnic_table_rule_action *action,
+ struct pipeline *p,
+ uint32_t table_id)
+{
+ if (action == NULL ||
+ action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD) ||
+ p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT &&
+ action->fwd.id >= p->n_ports_out)
+ return -1;
+
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE &&
+ action->fwd.id >= p->n_tables)
+ return -1;
+ }
+
+ return 0;
+}
+
+union table_rule_match_low_level {
+ struct rte_table_acl_rule_add_params acl_add;
+ struct rte_table_acl_rule_delete_params acl_delete;
+ struct rte_table_array_key array;
+ uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
+ struct rte_table_lpm_key lpm_ipv4;
+ struct rte_table_lpm_ipv6_key lpm_ipv6;
+};
+
+static int
+match_convert(struct softnic_table_rule_match *mh,
+ union table_rule_match_low_level *ml,
+ int add);
+
+static int
+action_convert(struct rte_table_action *a,
+ struct softnic_table_rule_action *action,
+ struct rte_pipeline_table_entry *data);
+
+int
+softnic_pipeline_table_rule_add(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ match == NULL ||
+ action == NULL ||
+ data == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables ||
+ match_check(match, p, table_id) ||
+ action_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+ union table_rule_match_low_level match_ll;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ int key_found;
+ uint8_t *buffer;
+
+ buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
+ if (buffer == NULL)
+ return -1;
+
+ /* Table match-action rule conversion */
+ data_in = (struct rte_pipeline_table_entry *)buffer;
+
+ status = match_convert(match, &match_ll, 1);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ status = action_convert(a, action, data_in);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Add rule (match, action) to table */
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ &match_ll,
+ data_in,
+ &key_found,
+ &data_out);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Write Response */
+ *data = data_out;
+
+ free(buffer);
+ return 0;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD;
+ req->id = table_id;
+ memcpy(&req->table_rule_add.match, match, sizeof(*match));
+ memcpy(&req->table_rule_add.action, action, sizeof(*action));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *data = rsp->table_rule_add.data;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_add_default(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_action *action,
+ void **data)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ action == NULL ||
+ data == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables ||
+ action_default_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint8_t *buffer;
+
+ buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
+ if (buffer == NULL)
+ return -1;
+
+ /* Apply actions */
+ data_in = (struct rte_pipeline_table_entry *)buffer;
+
+ data_in->action = action->fwd.action;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
+ data_in->port_id = action->fwd.id;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
+ data_in->table_id = action->fwd.id;
+
+ /* Add default rule to table */
+ status = rte_pipeline_table_default_entry_add(p->p,
+ table_id,
+ data_in,
+ &data_out);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Write Response */
+ *data = data_out;
+
+ free(buffer);
+ return 0;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
+ req->id = table_id;
+ memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *data = rsp->table_rule_add_default.data;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_add_bulk(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data,
+ uint32_t *n_rules)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ uint32_t i;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ match == NULL ||
+ action == NULL ||
+ data == NULL ||
+ n_rules == NULL ||
+ (*n_rules == 0))
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ for (i = 0; i < *n_rules; i++)
+ if (match_check(match, p, table_id) ||
+ action_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+ union table_rule_match_low_level *match_ll;
+ uint8_t *action_ll;
+ void **match_ll_ptr;
+ struct rte_pipeline_table_entry **action_ll_ptr;
+ struct rte_pipeline_table_entry **entries_ptr =
+ (struct rte_pipeline_table_entry **)data;
+ uint32_t bulk =
+ (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
+ int *found;
+
+ /* Memory allocation */
+ match_ll = calloc(*n_rules, sizeof(union table_rule_match_low_level));
+ action_ll = calloc(*n_rules, TABLE_RULE_ACTION_SIZE_MAX);
+ match_ll_ptr = calloc(*n_rules, sizeof(void *));
+ action_ll_ptr =
+ calloc(*n_rules, sizeof(struct rte_pipeline_table_entry *));
+ found = calloc(*n_rules, sizeof(int));
+
+ if (match_ll == NULL ||
+ action_ll == NULL ||
+ match_ll_ptr == NULL ||
+ action_ll_ptr == NULL ||
+ found == NULL)
+ goto fail;
+
+ for (i = 0; i < *n_rules; i++) {
+ match_ll_ptr[i] = (void *)&match_ll[i];
+ action_ll_ptr[i] =
+ (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
+ }
+
+ /* Rule match conversion */
+ for (i = 0; i < *n_rules; i++) {
+ status = match_convert(&match[i], match_ll_ptr[i], 1);
+ if (status)
+ goto fail;
+ }
+
+ /* Rule action conversion */
+ for (i = 0; i < *n_rules; i++) {
+ status = action_convert(a, &action[i], action_ll_ptr[i]);
+ if (status)
+ goto fail;
+ }
+
+ /* Add rule (match, action) to table */
+ if (bulk) {
+ status = rte_pipeline_table_entry_add_bulk(p->p,
+ table_id,
+ match_ll_ptr,
+ action_ll_ptr,
+ *n_rules,
+ found,
+ entries_ptr);
+ if (status)
+ *n_rules = 0;
+ } else {
+ for (i = 0; i < *n_rules; i++) {
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ match_ll_ptr[i],
+ action_ll_ptr[i],
+ &found[i],
+ &entries_ptr[i]);
+ if (status) {
+ *n_rules = i;
+ break;
+ }
+ }
+ }
+
+ /* Free */
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ return status;
+
+fail:
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ *n_rules = 0;
+ return -1;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
+ req->id = table_id;
+ req->table_rule_add_bulk.match = match;
+ req->table_rule_add_bulk.action = action;
+ req->table_rule_add_bulk.data = data;
+ req->table_rule_add_bulk.n_rules = *n_rules;
+ req->table_rule_add_bulk.bulk =
+ (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *n_rules = rsp->table_rule_add_bulk.n_rules;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_delete(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ match == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables ||
+ match_check(match, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ union table_rule_match_low_level match_ll;
+ int key_found;
+
+ status = match_convert(match, &match_ll, 0);
+ if (status)
+ return -1;
+
+ status = rte_pipeline_table_entry_delete(p->p,
+ table_id,
+ &match_ll,
+ &key_found,
+ NULL);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
+ req->id = table_id;
+ memcpy(&req->table_rule_delete.match, match, sizeof(*match));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_delete_default(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_table_default_entry_delete(p->p,
+ table_id,
+ NULL);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
+ req->id = table_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ data == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_stats_read(a,
+ data,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
+ req->id = table_id;
+ req->table_rule_stats_read.data = data;
+ req->table_rule_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_mtr_profile_add(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ profile == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_profile_add(a,
+ meter_profile_id,
+ profile);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
+ req->id = table_id;
+ req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
+ memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_mtr_profile_delete(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_profile_delete(a,
+ meter_profile_id);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
+ req->id = table_id;
+ req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_mtr_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ data == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_read(a,
+ data,
+ tc_mask,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
+ req->id = table_id;
+ req->table_rule_mtr_read.data = data;
+ req->table_rule_mtr_read.tc_mask = tc_mask;
+ req->table_rule_mtr_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_dscp_table_update(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *dscp_table)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ dscp_table == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_dscp_table_update(a,
+ dscp_mask,
+ dscp_table);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
+ req->id = table_id;
+ req->table_dscp_table_update.dscp_mask = dscp_mask;
+ memcpy(&req->table_dscp_table_update.dscp_table,
+ dscp_table, sizeof(*dscp_table));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_ttl_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ data == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_ttl_read(a,
+ data,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ;
+ req->id = table_id;
+ req->table_rule_ttl_read.data = data;
+ req->table_rule_ttl_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+/**
+ * Data plane threads: message handling
+ */
+static inline struct pipeline_msg_req *
+pipeline_msg_recv(struct rte_ring *msgq_req)
+{
+ struct pipeline_msg_req *req;
+
+ int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
+
+ if (status != 0)
+ return NULL;
+
+ return req;
+}
+
+static inline void
+pipeline_msg_send(struct rte_ring *msgq_rsp,
+ struct pipeline_msg_rsp *rsp)
+{
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(msgq_rsp, rsp);
+ } while (status == -ENOBUFS);
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+ int clear = req->port_in_stats_read.clear;
+
+ rsp->status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ &rsp->port_in_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+
+ rsp->status = rte_pipeline_port_in_enable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+
+ rsp->status = rte_pipeline_port_in_disable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+ int clear = req->port_out_stats_read.clear;
+
+ rsp->status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ &rsp->port_out_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+ int clear = req->table_stats_read.clear;
+
+ rsp->status = rte_pipeline_table_stats_read(p->p,
+ port_id,
+ &rsp->table_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static int
+match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
+{
+ if (depth > 128)
+ return -1;
+
+ switch (depth / 32) {
+ case 0:
+ depth32[0] = depth;
+ depth32[1] = 0;
+ depth32[2] = 0;
+ depth32[3] = 0;
+ return 0;
+
+ case 1:
+ depth32[0] = 32;
+ depth32[1] = depth - 32;
+ depth32[2] = 0;
+ depth32[3] = 0;
+ return 0;
+
+ case 2:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = depth - 64;
+ depth32[3] = 0;
+ return 0;
+
+ case 3:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = 32;
+ depth32[3] = depth - 96;
+ return 0;
+
+ case 4:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = 32;
+ depth32[3] = 32;
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+match_convert(struct softnic_table_rule_match *mh,
+ union table_rule_match_low_level *ml,
+ int add)
+{
+ memset(ml, 0, sizeof(*ml));
+
+ switch (mh->match_type) {
+ case TABLE_ACL:
+ if (mh->match.acl.ip_version)
+ if (add) {
+ ml->acl_add.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_add.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_add.field_value[1].value.u32 =
+ mh->match.acl.ipv4.sa;
+ ml->acl_add.field_value[1].mask_range.u32 =
+ mh->match.acl.sa_depth;
+
+ ml->acl_add.field_value[2].value.u32 =
+ mh->match.acl.ipv4.da;
+ ml->acl_add.field_value[2].mask_range.u32 =
+ mh->match.acl.da_depth;
+
+ ml->acl_add.field_value[3].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_add.field_value[3].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_add.field_value[4].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_add.field_value[4].mask_range.u16 =
+ mh->match.acl.dp1;
+
+ ml->acl_add.priority =
+ (int32_t)mh->match.acl.priority;
+ } else {
+ ml->acl_delete.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_delete.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_delete.field_value[1].value.u32 =
+ mh->match.acl.ipv4.sa;
+ ml->acl_delete.field_value[1].mask_range.u32 =
+ mh->match.acl.sa_depth;
+
+ ml->acl_delete.field_value[2].value.u32 =
+ mh->match.acl.ipv4.da;
+ ml->acl_delete.field_value[2].mask_range.u32 =
+ mh->match.acl.da_depth;
+
+ ml->acl_delete.field_value[3].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_delete.field_value[3].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_delete.field_value[4].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_delete.field_value[4].mask_range.u16 =
+ mh->match.acl.dp1;
+ }
+ else
+ if (add) {
+ uint32_t *sa32 =
+ (uint32_t *)mh->match.acl.ipv6.sa;
+ uint32_t *da32 =
+ (uint32_t *)mh->match.acl.ipv6.da;
+ uint32_t sa32_depth[4], da32_depth[4];
+ int status;
+
+ status = match_convert_ipv6_depth(mh->match.acl.sa_depth,
+ sa32_depth);
+ if (status)
+ return status;
+
+ status = match_convert_ipv6_depth(
+ mh->match.acl.da_depth,
+ da32_depth);
+ if (status)
+ return status;
+
+ ml->acl_add.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_add.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_add.field_value[1].value.u32 = sa32[0];
+ ml->acl_add.field_value[1].mask_range.u32 =
+ sa32_depth[0];
+ ml->acl_add.field_value[2].value.u32 = sa32[1];
+ ml->acl_add.field_value[2].mask_range.u32 =
+ sa32_depth[1];
+ ml->acl_add.field_value[3].value.u32 = sa32[2];
+ ml->acl_add.field_value[3].mask_range.u32 =
+ sa32_depth[2];
+ ml->acl_add.field_value[4].value.u32 = sa32[3];
+ ml->acl_add.field_value[4].mask_range.u32 =
+ sa32_depth[3];
+
+ ml->acl_add.field_value[5].value.u32 = da32[0];
+ ml->acl_add.field_value[5].mask_range.u32 =
+ da32_depth[0];
+ ml->acl_add.field_value[6].value.u32 = da32[1];
+ ml->acl_add.field_value[6].mask_range.u32 =
+ da32_depth[1];
+ ml->acl_add.field_value[7].value.u32 = da32[2];
+ ml->acl_add.field_value[7].mask_range.u32 =
+ da32_depth[2];
+ ml->acl_add.field_value[8].value.u32 = da32[3];
+ ml->acl_add.field_value[8].mask_range.u32 =
+ da32_depth[3];
+
+ ml->acl_add.field_value[9].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_add.field_value[9].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_add.field_value[10].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_add.field_value[10].mask_range.u16 =
+ mh->match.acl.dp1;
+
+ ml->acl_add.priority =
+ (int32_t)mh->match.acl.priority;
+ } else {
+ uint32_t *sa32 =
+ (uint32_t *)mh->match.acl.ipv6.sa;
+ uint32_t *da32 =
+ (uint32_t *)mh->match.acl.ipv6.da;
+ uint32_t sa32_depth[4], da32_depth[4];
+ int status;
+
+ status = match_convert_ipv6_depth(mh->match.acl.sa_depth,
+ sa32_depth);
+ if (status)
+ return status;
+
+ status = match_convert_ipv6_depth(mh->match.acl.da_depth,
+ da32_depth);
+ if (status)
+ return status;
+
+ ml->acl_delete.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_delete.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_delete.field_value[1].value.u32 =
+ sa32[0];
+ ml->acl_delete.field_value[1].mask_range.u32 =
+ sa32_depth[0];
+ ml->acl_delete.field_value[2].value.u32 =
+ sa32[1];
+ ml->acl_delete.field_value[2].mask_range.u32 =
+ sa32_depth[1];
+ ml->acl_delete.field_value[3].value.u32 =
+ sa32[2];
+ ml->acl_delete.field_value[3].mask_range.u32 =
+ sa32_depth[2];
+ ml->acl_delete.field_value[4].value.u32 =
+ sa32[3];
+ ml->acl_delete.field_value[4].mask_range.u32 =
+ sa32_depth[3];
+
+ ml->acl_delete.field_value[5].value.u32 =
+ da32[0];
+ ml->acl_delete.field_value[5].mask_range.u32 =
+ da32_depth[0];
+ ml->acl_delete.field_value[6].value.u32 =
+ da32[1];
+ ml->acl_delete.field_value[6].mask_range.u32 =
+ da32_depth[1];
+ ml->acl_delete.field_value[7].value.u32 =
+ da32[2];
+ ml->acl_delete.field_value[7].mask_range.u32 =
+ da32_depth[2];
+ ml->acl_delete.field_value[8].value.u32 =
+ da32[3];
+ ml->acl_delete.field_value[8].mask_range.u32 =
+ da32_depth[3];
+
+ ml->acl_delete.field_value[9].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_delete.field_value[9].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_delete.field_value[10].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_delete.field_value[10].mask_range.u16 =
+ mh->match.acl.dp1;
+ }
+ return 0;
+
+ case TABLE_ARRAY:
+ ml->array.pos = mh->match.array.pos;
+ return 0;
+
+ case TABLE_HASH:
+ memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
+ return 0;
+
+ case TABLE_LPM:
+ if (mh->match.lpm.ip_version) {
+ ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
+ ml->lpm_ipv4.depth = mh->match.lpm.depth;
+ } else {
+ memcpy(ml->lpm_ipv6.ip,
+ mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
+ ml->lpm_ipv6.depth = mh->match.lpm.depth;
+ }
+
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+action_convert(struct rte_table_action *a,
+ struct softnic_table_rule_action *action,
+ struct rte_pipeline_table_entry *data)
+{
+ int status;
+
+ /* Apply actions */
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_FWD,
+ &action->fwd);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_LB,
+ &action->lb);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_MTR,
+ &action->mtr);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TM,
+ &action->tm);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_ENCAP,
+ &action->encap);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_NAT,
+ &action->nat);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TTL,
+ &action->ttl);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_STATS,
+ &action->stats);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TIME,
+ &action->time);
+
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ union table_rule_match_low_level match_ll;
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ struct softnic_table_rule_match *match = &req->table_rule_add.match;
+ struct softnic_table_rule_action *action = &req->table_rule_add.action;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint32_t table_id = req->id;
+ int key_found, status;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ /* Apply actions */
+ memset(p->buffer, 0, sizeof(p->buffer));
+ data_in = (struct rte_pipeline_table_entry *)p->buffer;
+
+ status = match_convert(match, &match_ll, 1);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ status = action_convert(a, action, data_in);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ &match_ll,
+ data_in,
+ &key_found,
+ &data_out);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add.data = data_out;
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ struct softnic_table_rule_action *action = &req->table_rule_add_default.action;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint32_t table_id = req->id;
+ int status;
+
+ /* Apply actions */
+ memset(p->buffer, 0, sizeof(p->buffer));
+ data_in = (struct rte_pipeline_table_entry *)p->buffer;
+
+ data_in->action = action->fwd.action;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
+ data_in->port_id = action->fwd.id;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
+ data_in->table_id = action->fwd.id;
+
+ /* Add default rule to table */
+ status = rte_pipeline_table_default_entry_add(p->p,
+ table_id,
+ data_in,
+ &data_out);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add_default.data = data_out;
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+
+ uint32_t table_id = req->id;
+ struct softnic_table_rule_match *match = req->table_rule_add_bulk.match;
+ struct softnic_table_rule_action *action = req->table_rule_add_bulk.action;
+ struct rte_pipeline_table_entry **data =
+ (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
+ uint32_t n_rules = req->table_rule_add_bulk.n_rules;
+ uint32_t bulk = req->table_rule_add_bulk.bulk;
+
+ struct rte_table_action *a = p->table_data[table_id].a;
+ union table_rule_match_low_level *match_ll;
+ uint8_t *action_ll;
+ void **match_ll_ptr;
+ struct rte_pipeline_table_entry **action_ll_ptr;
+ int *found, status;
+ uint32_t i;
+
+ /* Memory allocation */
+ match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
+ action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
+ match_ll_ptr = calloc(n_rules, sizeof(void *));
+ action_ll_ptr =
+ calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
+ found = calloc(n_rules, sizeof(int));
+
+ if (match_ll == NULL ||
+ action_ll == NULL ||
+ match_ll_ptr == NULL ||
+ action_ll_ptr == NULL ||
+ found == NULL)
+ goto fail;
+
+ for (i = 0; i < n_rules; i++) {
+ match_ll_ptr[i] = (void *)&match_ll[i];
+ action_ll_ptr[i] =
+ (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
+ }
+
+ /* Rule match conversion */
+ for (i = 0; i < n_rules; i++) {
+ status = match_convert(&match[i], match_ll_ptr[i], 1);
+ if (status)
+ goto fail;
+ }
+
+ /* Rule action conversion */
+ for (i = 0; i < n_rules; i++) {
+ status = action_convert(a, &action[i], action_ll_ptr[i]);
+ if (status)
+ goto fail;
+ }
+
+ /* Add rule (match, action) to table */
+ if (bulk) {
+ status = rte_pipeline_table_entry_add_bulk(p->p,
+ table_id,
+ match_ll_ptr,
+ action_ll_ptr,
+ n_rules,
+ found,
+ data);
+ if (status)
+ n_rules = 0;
+ } else {
+ for (i = 0; i < n_rules; i++) {
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ match_ll_ptr[i],
+ action_ll_ptr[i],
+ &found[i],
+ &data[i]);
+ if (status) {
+ n_rules = i;
+ break;
+ }
+ }
+ }
+
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add_bulk.n_rules = n_rules;
+
+ /* Free */
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ return rsp;
+
+fail:
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ rsp->status = -1;
+ rsp->table_rule_add_bulk.n_rules = 0;
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ union table_rule_match_low_level match_ll;
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ struct softnic_table_rule_match *match = &req->table_rule_delete.match;
+ uint32_t table_id = req->id;
+ int key_found, status;
+
+ status = match_convert(match, &match_ll, 0);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ table_id,
+ &match_ll,
+ &key_found,
+ NULL);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ table_id,
+ NULL);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_stats_read.data;
+ int clear = req->table_rule_stats_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_stats_read(a,
+ data,
+ &rsp->table_rule_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
+ struct rte_table_action_meter_profile *profile =
+ &req->table_mtr_profile_add.profile;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_profile_add(a,
+ meter_profile_id,
+ profile);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ uint32_t meter_profile_id =
+ req->table_mtr_profile_delete.meter_profile_id;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_profile_delete(a,
+ meter_profile_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_mtr_read.data;
+ uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
+ int clear = req->table_rule_mtr_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_read(a,
+ data,
+ tc_mask,
+ &rsp->table_rule_mtr_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
+ struct rte_table_action_dscp_table *dscp_table =
+ &req->table_dscp_table_update.dscp_table;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_dscp_table_update(a,
+ dscp_mask,
+ dscp_table);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_ttl_read.data;
+ int clear = req->table_rule_ttl_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_ttl_read(a,
+ data,
+ &rsp->table_rule_ttl_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static void
+pipeline_msg_handle(struct pipeline_data *p)
+{
+ for ( ; ; ) {
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+
+ req = pipeline_msg_recv(p->msgq_req);
+ if (req == NULL)
+ break;
+
+ switch (req->type) {
+ case PIPELINE_REQ_PORT_IN_STATS_READ:
+ rsp = pipeline_msg_handle_port_in_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_IN_ENABLE:
+ rsp = pipeline_msg_handle_port_in_enable(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_IN_DISABLE:
+ rsp = pipeline_msg_handle_port_in_disable(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_OUT_STATS_READ:
+ rsp = pipeline_msg_handle_port_out_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_STATS_READ:
+ rsp = pipeline_msg_handle_table_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD:
+ rsp = pipeline_msg_handle_table_rule_add(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
+ rsp = pipeline_msg_handle_table_rule_add_default(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
+ rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_DELETE:
+ rsp = pipeline_msg_handle_table_rule_delete(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
+ rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_STATS_READ:
+ rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
+ rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
+ rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_MTR_READ:
+ rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
+ rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_TTL_READ:
+ rsp = pipeline_msg_handle_table_rule_ttl_read(p, req);
+ break;
+
+ default:
+ rsp = (struct pipeline_msg_rsp *)req;
+ rsp->status = -1;
+ }
+
+ pipeline_msg_send(p->msgq_rsp, rsp);
+ }
+}
+
+/**
+ * Data plane threads: main
+ */
+int
+rte_pmd_softnic_run(uint16_t port_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct pmd_internals *softnic;
+ struct softnic_thread_data *t;
+ uint32_t thread_id, j;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+#endif
+
+ softnic = dev->data->dev_private;
+ thread_id = rte_lcore_id();
+ t = &softnic->thread_data[thread_id];
+ t->iter++;
+
+ /* Data Plane */
+ for (j = 0; j < t->n_pipelines; j++)
+ rte_pipeline_run(t->p[j]);
+
+ /* Control Plane */
+ if ((t->iter & 0xFLLU) == 0) {
+ uint64_t time = rte_get_tsc_cycles();
+ uint64_t time_next_min = UINT64_MAX;
+
+ if (time < t->time_next_min)
+ return 0;
+
+ /* Pipeline message queues */
+ for (j = 0; j < t->n_pipelines; j++) {
+ struct pipeline_data *p =
+ &t->pipeline_data[j];
+ uint64_t time_next = p->time_next;
+
+ if (time_next <= time) {
+ pipeline_msg_handle(p);
+ rte_pipeline_flush(p->p);
+ time_next = time + p->timer_period;
+ p->time_next = time_next;
+ }
+
+ if (time_next < time_next_min)
+ time_next_min = time_next;
+ }
+
+ /* Thread message queues */
+ {
+ uint64_t time_next = t->time_next;
+
+ if (time_next <= time) {
+ thread_msg_handle(t);
+ time_next = time + t->timer_period;
+ t->time_next = time_next;
+ }
+
+ if (time_next < time_next_min)
+ time_next_min = time_next;
+ }
+
+ t->time_next_min = time_next_min;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index 11d638a9..baaafbe2 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -7,62 +7,148 @@
#include <string.h>
#include <rte_malloc.h>
+#include <rte_string_fns.h>
#include "rte_eth_softnic_internals.h"
#include "rte_eth_softnic.h"
-#define BYTES_IN_MBPS (1000 * 1000 / 8)
#define SUBPORT_TC_PERIOD 10
#define PIPE_TC_PERIOD 40
int
-tm_params_check(struct pmd_params *params, uint32_t hard_rate)
+softnic_tmgr_init(struct pmd_internals *p)
{
- uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
- uint32_t i;
+ TAILQ_INIT(&p->tmgr_port_list);
- /* rate */
- if (params->soft.tm.rate) {
- if (params->soft.tm.rate > hard_rate_bytes_per_sec)
- return -EINVAL;
- } else {
- params->soft.tm.rate =
- (hard_rate_bytes_per_sec > UINT32_MAX) ?
- UINT32_MAX : hard_rate_bytes_per_sec;
+ return 0;
+}
+
+void
+softnic_tmgr_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
+ if (tmgr_port == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
+ rte_sched_port_free(tmgr_port->s);
+ free(tmgr_port);
}
+}
+
+struct softnic_tmgr_port *
+softnic_tmgr_port_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tmgr_port *tmgr_port;
- /* nb_queues */
- if (params->soft.tm.nb_queues == 0)
- return -EINVAL;
+ if (name == NULL)
+ return NULL;
- if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
- params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
+ TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
+ if (strcmp(tmgr_port->name, name) == 0)
+ return tmgr_port;
- params->soft.tm.nb_queues =
- rte_align32pow2(params->soft.tm.nb_queues);
+ return NULL;
+}
- /* qsize */
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- if (params->soft.tm.qsize[i] == 0)
- return -EINVAL;
+struct softnic_tmgr_port *
+softnic_tmgr_port_create(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tmgr_port *tmgr_port;
+ struct tm_params *t = &p->soft.tm.params;
+ struct rte_sched_port *sched;
+ uint32_t n_subports, subport_id;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_tmgr_port_find(p, name))
+ return NULL;
+
+ /*
+ * Resource
+ */
+
+ /* Is hierarchy frozen? */
+ if (p->soft.tm.hierarchy_frozen == 0)
+ return NULL;
+
+ /* Port */
+ sched = rte_sched_port_config(&t->port_params);
+ if (sched == NULL)
+ return NULL;
+
+ /* Subport */
+ n_subports = t->port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+ uint32_t pipe_id;
+ int status;
- params->soft.tm.qsize[i] =
- rte_align32pow2(params->soft.tm.qsize[i]);
+ status = rte_sched_subport_config(sched,
+ subport_id,
+ &t->subport_params[subport_id]);
+ if (status) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+
+ /* Pipe */
+ for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
+ int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
+ int profile_id = t->pipe_to_profile[pos];
+
+ if (profile_id < 0)
+ continue;
+
+ status = rte_sched_pipe_config(sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+ }
}
- /* enq_bsz, deq_bsz */
- if (params->soft.tm.enq_bsz == 0 ||
- params->soft.tm.deq_bsz == 0 ||
- params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
- return -EINVAL;
+ /* Node allocation */
+ tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
+ if (tmgr_port == NULL) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
- return 0;
+ /* Node fill in */
+ strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
+ tmgr_port->s = sched;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
+
+ return tmgr_port;
}
-static void
+static struct rte_sched_port *
+SCHED(struct pmd_internals *p)
+{
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(p, "TMGR");
+ if (tmgr_port == NULL)
+ return NULL;
+
+ return tmgr_port->s;
+}
+
+void
tm_hierarchy_init(struct pmd_internals *p)
{
- memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
+ memset(&p->soft.tm, 0, sizeof(p->soft.tm));
/* Initialize shaper profile list */
TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
@@ -77,8 +163,8 @@ tm_hierarchy_init(struct pmd_internals *p)
TAILQ_INIT(&p->soft.tm.h.nodes);
}
-static void
-tm_hierarchy_uninit(struct pmd_internals *p)
+void
+tm_hierarchy_free(struct pmd_internals *p)
{
/* Remove all nodes*/
for ( ; ; ) {
@@ -129,111 +215,7 @@ tm_hierarchy_uninit(struct pmd_internals *p)
free(shaper_profile);
}
- memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
-}
-
-int
-tm_init(struct pmd_internals *p,
- struct pmd_params *params,
- int numa_node)
-{
- uint32_t enq_bsz = params->soft.tm.enq_bsz;
- uint32_t deq_bsz = params->soft.tm.deq_bsz;
-
- p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
- 2 * enq_bsz * sizeof(struct rte_mbuf *),
- 0,
- numa_node);
-
- if (p->soft.tm.pkts_enq == NULL)
- return -ENOMEM;
-
- p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
- deq_bsz * sizeof(struct rte_mbuf *),
- 0,
- numa_node);
-
- if (p->soft.tm.pkts_deq == NULL) {
- rte_free(p->soft.tm.pkts_enq);
- return -ENOMEM;
- }
-
tm_hierarchy_init(p);
-
- return 0;
-}
-
-void
-tm_free(struct pmd_internals *p)
-{
- tm_hierarchy_uninit(p);
- rte_free(p->soft.tm.pkts_enq);
- rte_free(p->soft.tm.pkts_deq);
-}
-
-int
-tm_start(struct pmd_internals *p)
-{
- struct tm_params *t = &p->soft.tm.params;
- uint32_t n_subports, subport_id;
- int status;
-
- /* Is hierarchy frozen? */
- if (p->soft.tm.hierarchy_frozen == 0)
- return -1;
-
- /* Port */
- p->soft.tm.sched = rte_sched_port_config(&t->port_params);
- if (p->soft.tm.sched == NULL)
- return -1;
-
- /* Subport */
- n_subports = t->port_params.n_subports_per_port;
- for (subport_id = 0; subport_id < n_subports; subport_id++) {
- uint32_t n_pipes_per_subport =
- t->port_params.n_pipes_per_subport;
- uint32_t pipe_id;
-
- status = rte_sched_subport_config(p->soft.tm.sched,
- subport_id,
- &t->subport_params[subport_id]);
- if (status) {
- rte_sched_port_free(p->soft.tm.sched);
- return -1;
- }
-
- /* Pipe */
- n_pipes_per_subport = t->port_params.n_pipes_per_subport;
- for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
- int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
- pipe_id;
- int profile_id = t->pipe_to_profile[pos];
-
- if (profile_id < 0)
- continue;
-
- status = rte_sched_pipe_config(p->soft.tm.sched,
- subport_id,
- pipe_id,
- profile_id);
- if (status) {
- rte_sched_port_free(p->soft.tm.sched);
- return -1;
- }
- }
- }
-
- return 0;
-}
-
-void
-tm_stop(struct pmd_internals *p)
-{
- if (p->soft.tm.sched)
- rte_sched_port_free(p->soft.tm.sched);
-
- /* Unfreeze hierarchy */
- p->soft.tm.hierarchy_frozen = 0;
}
static struct tm_shaper_profile *
@@ -384,7 +366,7 @@ static uint32_t
tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
{
struct pmd_internals *p = dev->data->dev_private;
- uint32_t n_queues_max = p->params.soft.tm.nb_queues;
+ uint32_t n_queues_max = p->params.tm.n_queues;
uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
uint32_t n_subports_max = n_pipes_max;
@@ -429,7 +411,7 @@ pmd_tm_node_type_get(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
- *is_leaf = node_id < p->params.soft.tm.nb_queues;
+ *is_leaf = node_id < p->params.tm.n_queues;
return 0;
}
@@ -1083,7 +1065,7 @@ update_subport_tc_rate(struct rte_eth_dev *dev,
subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
/* Update the subport configuration. */
- if (rte_sched_subport_config(p->soft.tm.sched,
+ if (rte_sched_subport_config(SCHED(p),
subport_id, &subport_params))
return -1;
@@ -1362,7 +1344,7 @@ node_add_check_port(struct rte_eth_dev *dev,
params->shaper_profile_id);
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1385,12 +1367,9 @@ node_add_check_port(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
- /* Shaper must be valid.
- * Shaper profile peak rate must fit the configured port rate.
- */
+ /* Shaper must be valid */
if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
- sp == NULL ||
- sp->params.peak.rate > p->params.soft.tm.rate)
+ sp == NULL)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
@@ -1437,7 +1416,7 @@ node_add_check_subport(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1509,7 +1488,7 @@ node_add_check_pipe(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1586,7 +1565,7 @@ node_add_check_tc(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1659,7 +1638,7 @@ node_add_check_queue(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: leaf */
- if (node_id >= p->params.soft.tm.nb_queues)
+ if (node_id >= p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -2548,10 +2527,10 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev)
.n_subports_per_port = root->n_children,
.n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
- .qsize = {p->params.soft.tm.qsize[0],
- p->params.soft.tm.qsize[1],
- p->params.soft.tm.qsize[2],
- p->params.soft.tm.qsize[3],
+ .qsize = {p->params.tm.qsize[0],
+ p->params.tm.qsize[1],
+ p->params.tm.qsize[2],
+ p->params.tm.qsize[3],
},
.pipe_profiles = t->pipe_profiles,
.n_pipe_profiles = t->n_pipe_profiles,
@@ -2614,10 +2593,8 @@ pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
status = hierarchy_commit_check(dev, error);
if (status) {
- if (clear_on_fail) {
- tm_hierarchy_uninit(p);
- tm_hierarchy_init(p);
- }
+ if (clear_on_fail)
+ tm_hierarchy_free(p);
return status;
}
@@ -2659,7 +2636,7 @@ update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -2708,7 +2685,7 @@ update_queue_weight(struct rte_eth_dev *dev,
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -2841,7 +2818,7 @@ update_subport_rate(struct rte_eth_dev *dev,
subport_params.tb_size = sp->params.peak.size;
/* Update the subport configuration. */
- if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
+ if (rte_sched_subport_config(SCHED(p), subport_id,
&subport_params))
return -1;
@@ -2888,7 +2865,7 @@ update_pipe_rate(struct rte_eth_dev *dev,
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -2933,7 +2910,7 @@ update_tc_rate(struct rte_eth_dev *dev,
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -3068,8 +3045,7 @@ read_port_stats(struct rte_eth_dev *dev,
uint32_t tc_ov, id;
/* Stats read */
- int status = rte_sched_subport_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_subport_read_stats(SCHED(p),
subport_id,
&s,
&tc_ov);
@@ -3116,8 +3092,7 @@ read_subport_stats(struct rte_eth_dev *dev,
uint32_t tc_ov, tc_id;
/* Stats read */
- int status = rte_sched_subport_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_subport_read_stats(SCHED(p),
subport_id,
&s,
&tc_ov);
@@ -3177,8 +3152,7 @@ read_pipe_stats(struct rte_eth_dev *dev,
i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
@@ -3238,8 +3212,7 @@ read_tc_stats(struct rte_eth_dev *dev,
tc_id,
i);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
@@ -3298,8 +3271,7 @@ read_queue_stats(struct rte_eth_dev *dev,
tc_id,
queue_id);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
diff --git a/drivers/net/softnic/rte_pmd_eth_softnic_version.map b/drivers/net/softnic/rte_pmd_softnic_version.map
index fb2cb68c..bc44b06f 100644
--- a/drivers/net/softnic/rte_pmd_eth_softnic_version.map
+++ b/drivers/net/softnic/rte_pmd_softnic_version.map
@@ -5,3 +5,9 @@ DPDK_17.11 {
local: *;
};
+
+EXPERIMENTAL {
+ global:
+
+ rte_pmd_softnic_manage;
+};
diff --git a/drivers/net/szedata2/meson.build b/drivers/net/szedata2/meson.build
new file mode 100644
index 00000000..da373374
--- /dev/null
+++ b/drivers/net/szedata2/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+dep = cc.find_library('sze2', required: false)
+build = dep.found()
+ext_deps += dep
+sources = files('rte_eth_szedata2.c')
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 910c64d0..1d20cb51 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1056,7 +1056,8 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = internals->max_rx_queues;
dev_info->max_tx_queues = internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_CRC_STRIP;
dev_info->tx_offload_capa = 0;
dev_info->rx_queue_offload_capa = 0;
dev_info->tx_queue_offload_capa = 0;
@@ -1922,9 +1923,7 @@ RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
"* combo6core & combov3 & szedata2 & ( szedata2_cv3 | szedata2_cv3_fdt )");
-RTE_INIT(szedata2_init_log);
-static void
-szedata2_init_log(void)
+RTE_INIT(szedata2_init_log)
{
szedata2_logtype_init = rte_log_register("pmd.net.szedata2.init");
if (szedata2_logtype_init >= 0)
diff --git a/drivers/net/tap/Makefile b/drivers/net/tap/Makefile
index ccc5c5fc..32433653 100644
--- a/drivers/net/tap/Makefile
+++ b/drivers/net/tap/Makefile
@@ -24,7 +24,7 @@ CFLAGS += -I.
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
-LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_bus_vdev -lrte_gso
CFLAGS += -DTAP_MAX_QUEUES=$(TAP_MAX_QUEUES)
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index 5531fe9d..feb92b48 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -17,6 +17,7 @@
#include <rte_ip.h>
#include <rte_string_fns.h>
+#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
@@ -55,6 +56,12 @@
#define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef"
#define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
+#define TAP_GSO_MBUFS_PER_CORE 128
+#define TAP_GSO_MBUF_SEG_SIZE 128
+#define TAP_GSO_MBUF_CACHE_SIZE 4
+#define TAP_GSO_MBUFS_NUM \
+ (TAP_GSO_MBUFS_PER_CORE * TAP_GSO_MBUF_CACHE_SIZE)
+
static struct rte_vdev_driver pmd_tap_drv;
static struct rte_vdev_driver pmd_tun_drv;
@@ -65,7 +72,7 @@ static const char *valid_arguments[] = {
NULL
};
-static int tap_unit;
+static unsigned int tap_unit;
static unsigned int tun_unit;
static char tuntap_name[8];
@@ -412,12 +419,45 @@ tap_tx_offload_get_queue_capa(void)
return DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
}
+/* Finalize l4 checksum calculation */
static void
-tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
- unsigned int l3_len)
+tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
+ uint32_t l4_raw_cksum)
+{
+ if (l4_cksum) {
+ uint32_t cksum;
+
+ cksum = __rte_raw_cksum_reduce(l4_raw_cksum);
+ cksum += l4_phdr_cksum;
+
+ cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
+ cksum = (~cksum) & 0xffff;
+ if (cksum == 0)
+ cksum = 0xffff;
+ *l4_cksum = cksum;
+ }
+}
+
+/* Accumaulate L4 raw checksums */
+static void
+tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
+ uint32_t *l4_raw_cksum)
+{
+ if (l4_cksum == NULL)
+ return;
+
+ *l4_raw_cksum = __rte_raw_cksum(l4_data, l4_len, *l4_raw_cksum);
+}
+
+/* L3 and L4 pseudo headers checksum offloads */
+static void
+tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
+ unsigned int l3_len, unsigned int l4_len, uint16_t **l4_cksum,
+ uint16_t *l4_phdr_cksum, uint32_t *l4_raw_cksum)
{
void *l3_hdr = packet + l2_len;
@@ -430,69 +470,50 @@ tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
}
if (ol_flags & PKT_TX_L4_MASK) {
- uint16_t l4_len;
- uint32_t cksum;
- uint16_t *l4_cksum;
void *l4_hdr;
l4_hdr = packet + l2_len + l3_len;
if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
- l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
+ *l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
- l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
+ *l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
else
return;
- *l4_cksum = 0;
- if (ol_flags & PKT_TX_IPV4) {
- struct ipv4_hdr *iph = l3_hdr;
-
- l4_len = rte_be_to_cpu_16(iph->total_length) - l3_len;
- cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
- } else {
- struct ipv6_hdr *ip6h = l3_hdr;
-
- /* payload_len does not include ext headers */
- l4_len = rte_be_to_cpu_16(ip6h->payload_len) -
- l3_len + sizeof(struct ipv6_hdr);
- cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
- }
- cksum += rte_raw_cksum(l4_hdr, l4_len);
- cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
- cksum = (~cksum) & 0xffff;
- if (cksum == 0)
- cksum = 0xffff;
- *l4_cksum = cksum;
+ **l4_cksum = 0;
+ if (ol_flags & PKT_TX_IPV4)
+ *l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
+ else
+ *l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
+ *l4_raw_cksum = __rte_raw_cksum(l4_hdr, l4_len, 0);
}
}
-/* Callback to handle sending packets from the tap interface
- */
-static uint16_t
-pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+static inline void
+tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
+ struct rte_mbuf **pmbufs,
+ uint16_t *num_packets, unsigned long *num_tx_bytes)
{
- struct tx_queue *txq = queue;
- uint16_t num_tx = 0;
- unsigned long num_tx_bytes = 0;
- uint32_t max_size;
int i;
+ uint16_t l234_hlen;
- if (unlikely(nb_pkts == 0))
- return 0;
-
- max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
- for (i = 0; i < nb_pkts; i++) {
- struct rte_mbuf *mbuf = bufs[num_tx];
- struct iovec iovecs[mbuf->nb_segs + 1];
+ for (i = 0; i < num_mbufs; i++) {
+ struct rte_mbuf *mbuf = pmbufs[i];
+ struct iovec iovecs[mbuf->nb_segs + 2];
struct tun_pi pi = { .flags = 0, .proto = 0x00 };
struct rte_mbuf *seg = mbuf;
char m_copy[mbuf->data_len];
+ int proto;
int n;
int j;
-
- /* stats.errs will be incremented */
- if (rte_pktmbuf_pkt_len(mbuf) > max_size)
- break;
-
+ int k; /* current index in iovecs for copying segments */
+ uint16_t seg_len; /* length of first segment */
+ uint16_t nb_segs;
+ uint16_t *l4_cksum; /* l4 checksum (pseudo header + payload) */
+ uint32_t l4_raw_cksum = 0; /* TCP/UDP payload raw checksum */
+ uint16_t l4_phdr_cksum = 0; /* TCP/UDP pseudo header checksum */
+ uint16_t is_cksum = 0; /* in case cksum should be offloaded */
+
+ l4_cksum = NULL;
if (txq->type == ETH_TUNTAP_TYPE_TUN) {
/*
* TUN and TAP are created with IFF_NO_PI disabled.
@@ -505,44 +526,163 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
* is 4 or 6, then protocol field is updated.
*/
char *buff_data = rte_pktmbuf_mtod(seg, void *);
- j = (*buff_data & 0xf0);
- pi.proto = (j == 0x40) ? rte_cpu_to_be_16(ETHER_TYPE_IPv4) :
- (j == 0x60) ? rte_cpu_to_be_16(ETHER_TYPE_IPv6) : 0x00;
+ proto = (*buff_data & 0xf0);
+ pi.proto = (proto == 0x40) ?
+ rte_cpu_to_be_16(ETHER_TYPE_IPv4) :
+ ((proto == 0x60) ?
+ rte_cpu_to_be_16(ETHER_TYPE_IPv6) :
+ 0x00);
}
- iovecs[0].iov_base = &pi;
- iovecs[0].iov_len = sizeof(pi);
- for (j = 1; j <= mbuf->nb_segs; j++) {
- iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
- iovecs[j].iov_base =
- rte_pktmbuf_mtod(seg, void *);
- seg = seg->next;
- }
+ k = 0;
+ iovecs[k].iov_base = &pi;
+ iovecs[k].iov_len = sizeof(pi);
+ k++;
+
+ nb_segs = mbuf->nb_segs;
if (txq->csum &&
((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
(mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
(mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
- /* Support only packets with all data in the same seg */
- if (mbuf->nb_segs > 1)
+ is_cksum = 1;
+
+ /* Support only packets with at least layer 4
+ * header included in the first segment
+ */
+ seg_len = rte_pktmbuf_data_len(mbuf);
+ l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ if (seg_len < l234_hlen)
break;
- /* To change checksums, work on a copy of data. */
+
+ /* To change checksums, work on a * copy of l2, l3
+ * headers + l4 pseudo header
+ */
rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
- rte_pktmbuf_data_len(mbuf));
- tap_tx_offload(m_copy, mbuf->ol_flags,
- mbuf->l2_len, mbuf->l3_len);
- iovecs[1].iov_base = m_copy;
+ l234_hlen);
+ tap_tx_l3_cksum(m_copy, mbuf->ol_flags,
+ mbuf->l2_len, mbuf->l3_len, mbuf->l4_len,
+ &l4_cksum, &l4_phdr_cksum,
+ &l4_raw_cksum);
+ iovecs[k].iov_base = m_copy;
+ iovecs[k].iov_len = l234_hlen;
+ k++;
+
+ /* Update next iovecs[] beyond l2, l3, l4 headers */
+ if (seg_len > l234_hlen) {
+ iovecs[k].iov_len = seg_len - l234_hlen;
+ iovecs[k].iov_base =
+ rte_pktmbuf_mtod(seg, char *) +
+ l234_hlen;
+ tap_tx_l4_add_rcksum(iovecs[k].iov_base,
+ iovecs[k].iov_len, l4_cksum,
+ &l4_raw_cksum);
+ k++;
+ nb_segs++;
+ }
+ seg = seg->next;
+ }
+
+ for (j = k; j <= nb_segs; j++) {
+ iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
+ iovecs[j].iov_base = rte_pktmbuf_mtod(seg, void *);
+ if (is_cksum)
+ tap_tx_l4_add_rcksum(iovecs[j].iov_base,
+ iovecs[j].iov_len, l4_cksum,
+ &l4_raw_cksum);
+ seg = seg->next;
}
+
+ if (is_cksum)
+ tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
+
/* copy the tx frame data */
- n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
+ n = writev(txq->fd, iovecs, j);
if (n <= 0)
break;
+ (*num_packets)++;
+ (*num_tx_bytes) += rte_pktmbuf_pkt_len(mbuf);
+ }
+}
+/* Callback to handle sending packets from the tap interface
+ */
+static uint16_t
+pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct tx_queue *txq = queue;
+ uint16_t num_tx = 0;
+ uint16_t num_packets = 0;
+ unsigned long num_tx_bytes = 0;
+ uint32_t max_size;
+ int i;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ struct rte_mbuf *gso_mbufs[MAX_GSO_MBUFS];
+ max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf_in = bufs[num_tx];
+ struct rte_mbuf **mbuf;
+ uint16_t num_mbufs = 0;
+ uint16_t tso_segsz = 0;
+ int ret;
+ uint16_t hdrs_len;
+ int j;
+ uint64_t tso;
+
+ tso = mbuf_in->ol_flags & PKT_TX_TCP_SEG;
+ if (tso) {
+ struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
+
+ assert(gso_ctx != NULL);
+
+ /* TCP segmentation implies TCP checksum offload */
+ mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;
+
+ /* gso size is calculated without ETHER_CRC_LEN */
+ hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
+ mbuf_in->l4_len;
+ tso_segsz = mbuf_in->tso_segsz + hdrs_len;
+ if (unlikely(tso_segsz == hdrs_len) ||
+ tso_segsz > *txq->mtu) {
+ txq->stats.errs++;
+ break;
+ }
+ gso_ctx->gso_size = tso_segsz;
+ ret = rte_gso_segment(mbuf_in, /* packet to segment */
+ gso_ctx, /* gso control block */
+ (struct rte_mbuf **)&gso_mbufs, /* out mbufs */
+ RTE_DIM(gso_mbufs)); /* max tso mbufs */
+
+ /* ret contains the number of new created mbufs */
+ if (ret < 0)
+ break;
+
+ mbuf = gso_mbufs;
+ num_mbufs = ret;
+ } else {
+ /* stats.errs will be incremented */
+ if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
+ break;
+
+ /* ret 0 indicates no new mbufs were created */
+ ret = 0;
+ mbuf = &mbuf_in;
+ num_mbufs = 1;
+ }
+
+ tap_write_mbufs(txq, num_mbufs, mbuf,
+ &num_packets, &num_tx_bytes);
num_tx++;
- num_tx_bytes += mbuf->pkt_len;
- rte_pktmbuf_free(mbuf);
+ /* free original mbuf */
+ rte_pktmbuf_free(mbuf_in);
+ /* free tso mbufs */
+ for (j = 0; j < ret; j++)
+ rte_pktmbuf_free(mbuf[j]);
}
- txq->stats.opackets += num_tx;
+ txq->stats.opackets += num_packets;
txq->stats.errs += nb_pkts - num_tx;
txq->stats.obytes += num_tx_bytes;
@@ -641,12 +781,22 @@ tap_link_set_up(struct rte_eth_dev *dev)
static int
tap_dev_start(struct rte_eth_dev *dev)
{
- int err;
+ int err, i;
err = tap_intr_handle_set(dev, 1);
if (err)
return err;
- return tap_link_set_up(dev);
+
+ err = tap_link_set_up(dev);
+ if (err)
+ return err;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return err;
}
/* This function gets called when the current port gets stopped.
@@ -654,6 +804,13 @@ tap_dev_start(struct rte_eth_dev *dev)
static void
tap_dev_stop(struct rte_eth_dev *dev)
{
+ int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
tap_intr_handle_set(dev, 0);
tap_link_set_down(dev);
}
@@ -1004,31 +1161,75 @@ tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
}
static int
+tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
+{
+ uint32_t gso_types;
+ char pool_name[64];
+
+ /*
+ * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
+ * size per mbuf use this pool for both direct and indirect mbufs
+ */
+
+ struct rte_mempool *mp; /* Mempool for GSO packets */
+
+ /* initialize GSO context */
+ gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+ snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
+ mp = rte_mempool_lookup((const char *)pool_name);
+ if (!mp) {
+ mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
+ TAP_GSO_MBUF_CACHE_SIZE, 0,
+ RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
+ SOCKET_ID_ANY);
+ if (!mp) {
+ struct pmd_internals *pmd = dev->data->dev_private;
+ RTE_LOG(DEBUG, PMD, "%s: failed to create mbuf pool for device %s\n",
+ pmd->name, dev->device->name);
+ return -1;
+ }
+ }
+
+ gso_ctx->direct_pool = mp;
+ gso_ctx->indirect_pool = mp;
+ gso_ctx->gso_types = gso_types;
+ gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
+ gso_ctx->flag = 0;
+
+ return 0;
+}
+
+static int
tap_setup_queue(struct rte_eth_dev *dev,
struct pmd_internals *internals,
uint16_t qid,
int is_rx)
{
+ int ret;
int *fd;
int *other_fd;
const char *dir;
struct pmd_internals *pmd = dev->data->dev_private;
struct rx_queue *rx = &internals->rxq[qid];
struct tx_queue *tx = &internals->txq[qid];
+ struct rte_gso_ctx *gso_ctx;
if (is_rx) {
fd = &rx->fd;
other_fd = &tx->fd;
dir = "rx";
+ gso_ctx = NULL;
} else {
fd = &tx->fd;
other_fd = &rx->fd;
dir = "tx";
+ gso_ctx = &tx->gso_ctx;
}
if (*fd != -1) {
/* fd for this queue already exists */
TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
pmd->name, *fd, dir, qid);
+ gso_ctx = NULL;
} else if (*other_fd != -1) {
/* Only other_fd exists. dup it */
*fd = dup(*other_fd);
@@ -1053,6 +1254,11 @@ tap_setup_queue(struct rte_eth_dev *dev,
tx->mtu = &dev->data->mtu;
rx->rxmode = &dev->data->dev_conf.rxmode;
+ if (gso_ctx) {
+ ret = tap_gso_ctx_setup(gso_ctx, dev);
+ if (ret)
+ return -1;
+ }
tx->type = pmd->type;
@@ -1342,6 +1548,37 @@ tap_rss_hash_update(struct rte_eth_dev *dev,
return 0;
}
+static int
+tap_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+tap_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+tap_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+tap_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
static const struct eth_dev_ops ops = {
.dev_start = tap_dev_start,
.dev_stop = tap_dev_stop,
@@ -1350,6 +1587,10 @@ static const struct eth_dev_ops ops = {
.dev_infos_get = tap_dev_info,
.rx_queue_setup = tap_rx_queue_setup,
.tx_queue_setup = tap_tx_queue_setup,
+ .rx_queue_start = tap_rx_queue_start,
+ .tx_queue_start = tap_tx_queue_start,
+ .rx_queue_stop = tap_rx_queue_stop,
+ .tx_queue_stop = tap_tx_queue_stop,
.rx_queue_release = tap_rx_queue_release,
.tx_queue_release = tap_tx_queue_release,
.flow_ctrl_get = tap_flow_ctrl_get,
@@ -1690,6 +1931,8 @@ rte_pmd_tun_probe(struct rte_vdev_device *dev)
return -1;
}
eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -1759,12 +2002,13 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
}
/* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
speed = ETH_SPEED_NUM_10G;
- snprintf(tap_name, sizeof(tap_name), "%s%d",
+ snprintf(tap_name, sizeof(tap_name), "%s%u",
DEFAULT_TAP_NAME, tap_unit++);
memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
@@ -1888,9 +2132,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_tap,
ETH_TAP_REMOTE_ARG "=<string>");
int tap_logtype;
-RTE_INIT(tap_init_log);
-static void
-tap_init_log(void)
+RTE_INIT(tap_init_log)
{
tap_logtype = rte_log_register("pmd.net.tap");
if (tap_logtype >= 0)
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index 7b21d0d8..44e2773f 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -15,6 +15,7 @@
#include <rte_ethdev_driver.h>
#include <rte_ether.h>
+#include <rte_gso.h>
#include "tap_log.h"
#ifdef IFF_MULTI_QUEUE
@@ -22,6 +23,7 @@
#else
#define RTE_PMD_TAP_MAX_QUEUES 1
#endif
+#define MAX_GSO_MBUFS 64
enum rte_tuntap_type {
ETH_TUNTAP_TYPE_UNKNOWN,
@@ -59,6 +61,7 @@ struct tx_queue {
uint16_t *mtu; /* Pointer to MTU from dev_data */
uint16_t csum:1; /* Enable checksum offloading */
struct pkt_stats stats; /* Stats for this TX queue */
+ struct rte_gso_ctx gso_ctx; /* GSO context */
};
struct pmd_internals {
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index 6b60e6dc..0e01af62 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -537,7 +537,7 @@ tap_flow_create_eth(const struct rte_flow_item *item, void *data)
if (!flow)
return 0;
msg = &flow->msg;
- if (!is_zero_ether_addr(&spec->dst)) {
+ if (!is_zero_ether_addr(&mask->dst)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
&spec->dst.addr_bytes);
tap_nlattr_add(&msg->nh,
@@ -651,13 +651,13 @@ tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
info->eth_type = htons(ETH_P_IP);
if (!spec)
return 0;
- if (spec->hdr.dst_addr) {
+ if (mask->hdr.dst_addr) {
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
spec->hdr.dst_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
mask->hdr.dst_addr);
}
- if (spec->hdr.src_addr) {
+ if (mask->hdr.src_addr) {
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
spec->hdr.src_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
@@ -707,13 +707,13 @@ tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
info->eth_type = htons(ETH_P_IPV6);
if (!spec)
return 0;
- if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
+ if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
}
- if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
+ if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
@@ -762,10 +762,10 @@ tap_flow_create_udp(const struct rte_flow_item *item, void *data)
tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
if (!spec)
return 0;
- if (spec->hdr.dst_port & mask->hdr.dst_port)
+ if (mask->hdr.dst_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
spec->hdr.dst_port);
- if (spec->hdr.src_port & mask->hdr.src_port)
+ if (mask->hdr.src_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
spec->hdr.src_port);
return 0;
@@ -808,10 +808,10 @@ tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
if (!spec)
return 0;
- if (spec->hdr.dst_port & mask->hdr.dst_port)
+ if (mask->hdr.dst_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
spec->hdr.dst_port);
- if (spec->hdr.src_port & mask->hdr.src_port)
+ if (mask->hdr.src_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
spec->hdr.src_port);
return 0;
diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index ea8092ca..5b1abe20 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -699,6 +699,19 @@ nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
else
val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
+ nic->vlan_strip = enable;
+ nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+void
+nicvf_first_skip_config(struct nicvf *nic, uint8_t num_dwords)
+{
+ uint64_t val;
+
+ val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+ val &= ~(0xfULL);
+ val |= (num_dwords & 0xf);
+
nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
}
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
index 284d0bdf..fd13ea84 100644
--- a/drivers/net/thunderx/base/nicvf_hw.h
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -193,6 +193,7 @@ uint32_t nicvf_qsize_sq_roundup(uint32_t val);
void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable);
void nicvf_apad_config(struct nicvf *nic, bool enable);
+void nicvf_first_skip_config(struct nicvf *nic, uint8_t dwords);
int nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg);
int nicvf_rss_term(struct nicvf *nic);
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 99fcd516..a55c3ca6 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -34,6 +34,8 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_tailq.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
#include "base/nicvf_plat.h"
@@ -50,10 +52,10 @@ static void nicvf_dev_stop(struct rte_eth_dev *dev);
static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
bool cleanup);
+static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-RTE_INIT(nicvf_init_log);
-static void
-nicvf_init_log(void)
+RTE_INIT(nicvf_init_log)
{
nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
if (nicvf_logtype_mbox >= 0)
@@ -355,11 +357,9 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
}
memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
- if (dev->rx_pkt_burst == nicvf_recv_pkts ||
- dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
- return ptypes;
- return NULL;
+ /* All Ptypes are supported in all Rx functions. */
+ return ptypes;
}
static void
@@ -883,7 +883,7 @@ nicvf_dev_tx_queue_release(void *sq)
static void
nicvf_set_tx_function(struct rte_eth_dev *dev)
{
- struct nicvf_txq *txq;
+ struct nicvf_txq *txq = NULL;
size_t i;
bool multiseg = false;
@@ -904,6 +904,9 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
dev->tx_pkt_burst = nicvf_xmit_pkts;
}
+ if (!txq)
+ return;
+
if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
else
@@ -913,13 +916,23 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
static void
nicvf_set_rx_function(struct rte_eth_dev *dev)
{
- if (dev->data->scattered_rx) {
- PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
- dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
- } else {
- PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
- dev->rx_pkt_burst = nicvf_recv_pkts;
- }
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ const eth_rx_burst_t rx_burst_func[2][2][2] = {
+ /* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */
+ [0][0][0] = nicvf_recv_pkts_no_offload,
+ [0][0][1] = nicvf_recv_pkts_vlan_strip,
+ [0][1][0] = nicvf_recv_pkts_cksum,
+ [0][1][1] = nicvf_recv_pkts_cksum_vlan_strip,
+ [1][0][0] = nicvf_recv_pkts_multiseg_no_offload,
+ [1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip,
+ [1][1][0] = nicvf_recv_pkts_multiseg_cksum,
+ [1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip,
+ };
+
+ dev->rx_pkt_burst =
+ rx_burst_func[dev->data->scattered_rx]
+ [nic->offload_cksum][nic->vlan_strip];
}
static int
@@ -1230,6 +1243,7 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
{
uintptr_t p;
struct rte_mbuf mb_def;
+ struct nicvf *nic = rxq->nic;
RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
@@ -1239,8 +1253,11 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
offsetof(struct rte_mbuf, data_off) != 4);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
offsetof(struct rte_mbuf, data_off) != 6);
+ RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) -
+ offsetof(struct nicvf_rxq,
+ rxq_fastpath_data_start) > 128);
mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes);
mb_def.port = rxq->port_id;
rte_mbuf_refcnt_set(&mb_def, 1);
@@ -1260,9 +1277,19 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
struct nicvf_rxq *rxq;
struct nicvf *nic = nicvf_pmd_priv(dev);
uint64_t offloads;
+ uint32_t buffsz;
+ struct rte_pktmbuf_pool_private *mbp_priv;
PMD_INIT_FUNC_TRACE();
+ /* First skip check */
+ mbp_priv = rte_mempool_get_priv(mp);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ if (buffsz < (uint32_t)(nic->skip_bytes)) {
+ PMD_INIT_LOG(ERR, "First skip is more than configured buffer size");
+ return -EINVAL;
+ }
+
if (qidx >= MAX_RCV_QUEUES_PER_QS)
nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
@@ -1298,6 +1325,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
return -EINVAL;
}
+
/* Check rx_free_thresh upper bound */
rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
rx_conf->rx_free_thresh :
@@ -1452,7 +1480,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
struct rte_mbuf *mbuf;
uint16_t rx_start, rx_end;
uint16_t tx_start, tx_end;
- bool vlan_strip;
+ int mask;
PMD_INIT_FUNC_TRACE();
@@ -1498,6 +1526,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
return -EINVAL;
}
rxq->mbuf_phys_off -= data_off;
+ rxq->mbuf_phys_off -= nic->skip_bytes;
if (mbuf_phys_off == 0)
mbuf_phys_off = rxq->mbuf_phys_off;
@@ -1572,9 +1601,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
/* Configure VLAN Strip */
- vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP);
- nicvf_vlan_hw_strip(nic, vlan_strip);
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = nicvf_vlan_offload_config(dev, mask);
/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
* to the 64bit memory address.
@@ -1727,7 +1756,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
return ret;
}
- /* Configure callbacks based on scatter mode */
+ /* Configure callbacks based on offloads */
nicvf_set_tx_function(dev);
nicvf_set_rx_function(dev);
@@ -1887,7 +1916,10 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
@@ -1943,6 +1975,9 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
}
}
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ nic->offload_cksum = 1;
+
PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
dev->data->port_id, nicvf_hw_cap(nic));
@@ -1962,6 +1997,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
.dev_infos_get = nicvf_dev_info_get,
.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
.mtu_set = nicvf_dev_set_mtu,
+ .vlan_offload_set = nicvf_vlan_offload_set,
.reta_update = nicvf_dev_reta_update,
.reta_query = nicvf_dev_reta_query,
.rss_hash_update = nicvf_dev_rss_hash_update,
@@ -1979,6 +2015,83 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
};
static int
+nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ nicvf_vlan_hw_strip(nic, true);
+ else
+ nicvf_vlan_hw_strip(nic, false);
+ }
+
+ return 0;
+}
+
+static int
+nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ nicvf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
+static inline int
+nicvf_set_first_skip(struct rte_eth_dev *dev)
+{
+ int bytes_to_skip = 0;
+ int ret = 0;
+ unsigned int i;
+ struct rte_kvargs *kvlist;
+ static const char *const skip[] = {
+ SKIP_DATA_BYTES,
+ NULL};
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (!dev->device->devargs) {
+ nicvf_first_skip_config(nic, 0);
+ return ret;
+ }
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, skip);
+ if (!kvlist)
+ return -EINVAL;
+
+ if (kvlist->count == 0)
+ goto exit;
+
+ for (i = 0; i != kvlist->count; ++i) {
+ const struct rte_kvargs_pair *pair = &kvlist->pairs[i];
+
+ if (!strcmp(pair->key, SKIP_DATA_BYTES))
+ bytes_to_skip = atoi(pair->value);
+ }
+
+ /*128 bytes amounts to one cache line*/
+ if (bytes_to_skip >= 0 && bytes_to_skip < 128) {
+ if (!(bytes_to_skip % 8)) {
+ nicvf_first_skip_config(nic, (bytes_to_skip / 8));
+ nic->skip_bytes = bytes_to_skip;
+ goto kvlist_free;
+ } else {
+ PMD_INIT_LOG(ERR, "skip_data_bytes should be multiple of 8");
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "skip_data_bytes should be less than 128");
+ ret = -EINVAL;
+ goto exit;
+ }
+exit:
+ nicvf_first_skip_config(nic, 0);
+kvlist_free:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+static int
nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
{
int ret;
@@ -2087,6 +2200,11 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
goto malloc_fail;
}
+ ret = nicvf_set_first_skip(eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure first skip");
+ goto malloc_fail;
+ }
PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, nic->vendor_id, nic->device_id,
nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
@@ -2159,3 +2277,4 @@ static struct rte_pci_driver rte_nicvf_pmd = {
RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_thunderx, SKIP_DATA_BYTES "=<int>");
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index ea8dccd1..ae440fef 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -38,6 +38,7 @@
DEV_TX_OFFLOAD_MULTI_SEGS)
#define NICVF_RX_OFFLOAD_CAPA ( \
+ DEV_RX_OFFLOAD_CHECKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_CRC_STRIP | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
@@ -51,6 +52,7 @@
#define VLAN_TAG_SIZE 4 /* 802.3ac tag */
+#define SKIP_DATA_BYTES "skip_data_bytes"
static inline struct nicvf *
nicvf_pmd_priv(struct rte_eth_dev *eth_dev)
{
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 72305d9d..247c3568 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -162,12 +162,14 @@ nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
free_desc -= TX_DESC_PER_PKT;
}
- sq->tail = tail;
- sq->xmit_bufs += i;
- rte_wmb();
+ if (likely(i)) {
+ sq->tail = tail;
+ sq->xmit_bufs += i;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ }
return i;
}
@@ -218,12 +220,14 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
}
}
- sq->tail = tail;
- sq->xmit_bufs += used_bufs;
- rte_wmb();
+ if (likely(used_desc)) {
+ sq->tail = tail;
+ sq->xmit_bufs += used_bufs;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, used_desc);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, used_desc);
+ }
return i;
}
@@ -327,6 +331,20 @@ nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
}
+static inline uint64_t __hot
+nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
+{
+ static const uint64_t flag_table[3] __rte_cache_aligned = {
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ };
+
+ const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 |
+ (cqe_rx_w0.err_opcode == CQE_RX_ERR_IP_CHK);
+ return flag_table[idx];
+}
+
static inline int __hot
nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
{
@@ -385,11 +403,13 @@ nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
if (likely(cqe_rx_w0.rss_alg)) {
pkt->hash.rss = cqe_rx_w2.rss_tag;
pkt->ol_flags |= PKT_RX_RSS_HASH;
+
}
}
-uint16_t __hot
-nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+static __rte_always_inline uint16_t
+nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ const uint32_t flag)
{
uint32_t i, to_process;
struct cqe_rx_t *cqe_rx;
@@ -420,7 +440,19 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
- pkt->ol_flags = 0;
+
+ if (flag & NICVF_RX_OFFLOAD_NONE)
+ pkt->ol_flags = 0;
+ if (flag & NICVF_RX_OFFLOAD_CKSUM)
+ pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+ if (unlikely(cqe_rx_w0.vlan_stripped)) {
+ pkt->ol_flags |= PKT_RX_VLAN
+ | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci =
+ rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+ }
+ }
pkt->data_len = cqe_rx_w3.rb0_sz;
pkt->pkt_len = cqe_rx_w3.rb0_sz;
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
@@ -445,11 +477,43 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return to_process;
}
-static inline uint16_t __hot
+uint16_t __hot
+nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+static __rte_always_inline uint16_t __hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
uint64_t mbuf_phys_off,
struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
- uint64_t mbuf_init)
+ uint64_t mbuf_init, const uint32_t flag)
{
struct rte_mbuf *pkt, *seg, *prev;
cqe_rx_word0_t cqe_rx_w0;
@@ -467,12 +531,22 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
- pkt->ol_flags = 0;
pkt->pkt_len = cqe_rx_w1.pkt_len;
pkt->data_len = rb_sz[nicvf_frag_num(0)];
nicvf_mbuff_init_mseg_update(
pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_NONE)
+ pkt->ol_flags = 0;
+ if (flag & NICVF_RX_OFFLOAD_CKSUM)
+ pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+ if (unlikely(cqe_rx_w0.vlan_stripped)) {
+ pkt->ol_flags |= PKT_RX_VLAN
+ | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+ }
+ }
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
*rx_pkt = pkt;
@@ -491,9 +565,9 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
return nb_segs;
}
-uint16_t __hot
+static __rte_always_inline uint16_t __hot
nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+ uint16_t nb_pkts, const uint32_t flag)
{
union cq_entry_t *cq_entry;
struct cqe_rx_t *cqe_rx;
@@ -515,7 +589,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
cq_entry = &desc[cqe_head];
cqe_rx = (struct cqe_rx_t *)cq_entry;
nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
- rx_pkts + i, rbptr_offset, mbuf_init);
+ rx_pkts + i, rbptr_offset, mbuf_init, flag);
buffers_consumed += nb_segs;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -535,6 +609,38 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
return to_process;
}
+uint16_t __hot
+nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
uint32_t
nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
{
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 8bdd582e..a39808cb 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -8,6 +8,10 @@
#include <rte_byteorder.h>
#include <rte_ethdev_driver.h>
+#define NICVF_RX_OFFLOAD_NONE 0x1
+#define NICVF_RX_OFFLOAD_CKSUM 0x2
+#define NICVF_RX_OFFLOAD_VLAN_STRIP 0x4
+
#define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
#ifndef __hot
@@ -86,9 +90,23 @@ nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
-uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
-uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t pkts);
+uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t pkts);
+uint16_t nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index d4a83c37..dd52f38e 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -55,25 +55,27 @@ union mbuf_initializer {
};
struct nicvf_rxq {
+ MARKER rxq_fastpath_data_start;
+ uint8_t rbptr_offset;
+ uint16_t rx_free_thresh;
+ uint32_t head;
+ uint32_t qlen_mask;
+ int32_t recv_buffers;
+ int32_t available_space;
uint64_t mbuf_phys_off;
uintptr_t cq_status;
uintptr_t cq_door;
- union mbuf_initializer mbuf_initializer;
- nicvf_iova_addr_t phys;
- union cq_entry_t *desc;
struct nicvf_rbdr *shared_rbdr;
- struct nicvf *nic;
struct rte_mempool *pool;
- uint32_t head;
- uint32_t qlen_mask;
- int32_t available_space;
- int32_t recv_buffers;
- uint16_t rx_free_thresh;
- uint16_t queue_id;
- uint16_t precharge_cnt;
+ union cq_entry_t *desc;
+ union mbuf_initializer mbuf_initializer;
+ MARKER rxq_fastpath_data_end;
uint8_t rx_drop_en;
+ uint16_t precharge_cnt;
uint16_t port_id;
- uint8_t rbptr_offset;
+ uint16_t queue_id;
+ struct nicvf *nic;
+ nicvf_iova_addr_t phys;
} __rte_cache_aligned;
struct nicvf {
@@ -85,6 +87,8 @@ struct nicvf {
bool loopback_supported;
bool pf_acked:1;
bool pf_nacked:1;
+ bool offload_cksum:1;
+ bool vlan_strip:1;
uint64_t hwcap;
uint8_t link_up;
uint8_t duplex;
@@ -99,6 +103,7 @@ struct nicvf {
struct rte_intr_handle intr_handle;
uint8_t cpi_alg;
uint16_t mtu;
+ int skip_bytes;
bool vlan_filter_en;
uint8_t mac_addr[ETHER_ADDR_LEN];
/* secondary queue set support */
diff --git a/drivers/net/vhost/meson.build b/drivers/net/vhost/meson.build
new file mode 100644
index 00000000..9b067c35
--- /dev/null
+++ b/drivers/net/vhost/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+build = dpdk_conf.has('RTE_LIBRTE_VHOST')
+version = 2
+sources = files('rte_eth_vhost.c')
+install_headers('rte_eth_vhost.h')
+deps += 'vhost'
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index ba9d768a..e58f3221 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -1070,7 +1070,8 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -1353,6 +1354,7 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
}
/* TODO: request info from primary to set up Rx and Tx */
eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -1456,9 +1458,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
"iface=<ifc> "
"queues=<int>");
-RTE_INIT(vhost_init_log);
-static void
-vhost_init_log(void)
+RTE_INIT(vhost_init_log)
{
vhost_logtype = rte_log_register("pmd.net.vhost");
if (vhost_logtype >= 0)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index df50a571..614357da 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1320,6 +1320,11 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (hw->use_inorder_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using inorder mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;
} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
PMD_INIT_LOG(INFO,
"virtio: using mergeable buffer Rx path on port %u",
@@ -1331,10 +1336,10 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
- if (hw->use_simple_tx) {
- PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
+ if (hw->use_inorder_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
} else {
PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
eth_dev->data->port_id);
@@ -1781,9 +1786,7 @@ static struct rte_pci_driver rte_virtio_pmd = {
.remove = eth_virtio_pci_remove,
};
-RTE_INIT(rte_virtio_pmd_init);
-static void
-rte_virtio_pmd_init(void)
+RTE_INIT(rte_virtio_pmd_init)
{
if (rte_eal_iopl_init() != 0) {
PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
@@ -1793,6 +1796,22 @@ rte_virtio_pmd_init(void)
rte_pci_register(&rte_virtio_pmd);
}
+static bool
+rx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
+}
+
+static bool
+tx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
+}
+
/*
* Configure virtio device
* It returns 0 on success.
@@ -1801,8 +1820,10 @@ static int
virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
struct virtio_hw *hw = dev->data->dev_private;
uint64_t rx_offloads = rxmode->offloads;
+ uint64_t tx_offloads = txmode->offloads;
uint64_t req_features;
int ret;
@@ -1824,6 +1845,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM))
+ req_features |= (1ULL << VIRTIO_NET_F_CSUM);
+
+ if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+ req_features |=
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6);
+
/* if request features changed, reinit the device */
if (req_features != hw->req_guest_features) {
ret = virtio_init_device(dev, req_features);
@@ -1861,6 +1891,9 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -ENOTSUP;
}
+ hw->has_tx_offload = tx_offload_enabled(hw);
+ hw->has_rx_offload = rx_offload_enabled(hw);
+
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
/* Enable vector (0) for Link State Intrerrupt */
if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
@@ -1872,21 +1905,30 @@ virtio_dev_configure(struct rte_eth_dev *dev)
rte_spinlock_init(&hw->state_lock);
hw->use_simple_rx = 1;
- hw->use_simple_tx = 1;
+
+ if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
+ hw->use_inorder_tx = 1;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ hw->use_inorder_rx = 1;
+ hw->use_simple_rx = 0;
+ } else {
+ hw->use_inorder_rx = 0;
+ }
+ }
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
hw->use_simple_rx = 0;
- hw->use_simple_tx = 0;
}
#endif
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- hw->use_simple_rx = 0;
- hw->use_simple_tx = 0;
+ hw->use_simple_rx = 0;
}
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM))
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_VLAN_STRIP))
hw->use_simple_rx = 0;
return 0;
@@ -2122,12 +2164,10 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
- };
host_features = VTPCI_OPS(hw)->get_features(hw);
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_CRC_STRIP;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |
@@ -2142,14 +2182,14 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT;
- if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
+ if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
dev_info->tx_offload_capa |=
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
}
tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6);
- if ((hw->guest_features & tso_mask) == tso_mask)
+ if ((host_features & tso_mask) == tso_mask)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
}
@@ -2168,9 +2208,7 @@ RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(virtio_init_log);
-static void
-virtio_init_log(void)
+RTE_INIT(virtio_init_log)
{
virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
if (virtio_logtype_init >= 0)
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index bb40064e..b726ad10 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -28,14 +28,12 @@
1u << VIRTIO_NET_F_CTRL_VQ | \
1u << VIRTIO_NET_F_CTRL_RX | \
1u << VIRTIO_NET_F_CTRL_VLAN | \
- 1u << VIRTIO_NET_F_CSUM | \
- 1u << VIRTIO_NET_F_HOST_TSO4 | \
- 1u << VIRTIO_NET_F_HOST_TSO6 | \
1u << VIRTIO_NET_F_MRG_RXBUF | \
1u << VIRTIO_NET_F_MTU | \
1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE | \
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
+ 1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_IOMMU_PLATFORM)
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
@@ -44,13 +42,6 @@
1u << VIRTIO_NET_F_GUEST_TSO4 | \
1u << VIRTIO_NET_F_GUEST_TSO6)
-#define VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS \
- (DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_VLAN_STRIP)
-
/*
* CQ function prototype
*/
@@ -83,9 +74,15 @@ uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index a28ba833..58fdd3d4 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -6,6 +6,7 @@
#define _VIRTIO_PCI_H_
#include <stdint.h>
+#include <stdbool.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -121,6 +122,12 @@ struct virtnet_ctl;
#define VIRTIO_TRANSPORT_F_START 28
#define VIRTIO_TRANSPORT_F_END 34
+/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER 35
+
/* The Guest publishes the used index for which it expects an interrupt
* at the end of the avail ring. Host should ignore the avail->flags field. */
/* The Host publishes the avail index for which it expects a kick
@@ -232,7 +239,10 @@ struct virtio_hw {
uint8_t use_msix;
uint8_t modern;
uint8_t use_simple_rx;
- uint8_t use_simple_tx;
+ uint8_t use_inorder_rx;
+ uint8_t use_inorder_tx;
+ bool has_tx_offload;
+ bool has_rx_offload;
uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
uint32_t notify_off_multiplier;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 92fab217..eb891433 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -48,6 +48,13 @@ virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
}
void
+vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
+{
+ vq->vq_free_cnt += num;
+ vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
+}
+
+void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp, *dp_tail;
@@ -115,6 +122,44 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
return i;
}
+static uint16_t
+virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t *len,
+ uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_mbuf *cookie;
+ uint16_t used_idx = 0;
+ uint16_t i;
+
+ if (unlikely(num == 0))
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ /* Desc idx same as used idx */
+ uep = &vq->vq_ring.used->ring[used_idx];
+ len[i] = uep->len;
+ cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
+
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+ vq->vq_used_cons_idx++;
+ vq->vq_descx[used_idx].cookie = NULL;
+ }
+
+ vq_ring_free_inorder(vq, used_idx, i);
+ return i;
+}
+
#ifndef DEFAULT_TX_FREE_THRESH
#define DEFAULT_TX_FREE_THRESH 32
#endif
@@ -143,6 +188,83 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
}
}
+/* Cleanup from completed inorder transmits. */
+static void
+virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
+{
+ uint16_t i, used_idx, desc_idx = 0, last_idx;
+ int16_t free_cnt = 0;
+ struct vq_desc_extra *dxp = NULL;
+
+ if (unlikely(num == 0))
+ return;
+
+ for (i = 0; i < num; i++) {
+ struct vring_used_elem *uep;
+
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_used_cons_idx++;
+
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
+
+ last_idx = desc_idx + dxp->ndescs - 1;
+ free_cnt = last_idx - vq->vq_desc_tail_idx;
+ if (free_cnt <= 0)
+ free_cnt += vq->vq_nentries;
+
+ vq_ring_free_inorder(vq, last_idx, free_cnt);
+}
+
+static inline int
+virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
+ struct rte_mbuf **cookies,
+ uint16_t num)
+{
+ struct vq_desc_extra *dxp;
+ struct virtio_hw *hw = vq->hw;
+ struct vring_desc *start_dp;
+ uint16_t head_idx, idx, i = 0;
+
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < num))
+ return -EMSGSIZE;
+
+ head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
+ start_dp = vq->vq_ring.desc;
+
+ while (i < num) {
+ idx = head_idx & (vq->vq_nentries - 1);
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookies[i];
+ dxp->ndescs = 1;
+
+ start_dp[idx].addr =
+ VIRTIO_MBUF_ADDR(cookies[i], vq) +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[idx].len =
+ cookies[i]->buf_len -
+ RTE_PKTMBUF_HEADROOM +
+ hw->vtnet_hdr_size;
+ start_dp[idx].flags = VRING_DESC_F_WRITE;
+
+ vq_update_avail_ring(vq, idx);
+ head_idx++;
+ i++;
+ }
+
+ vq->vq_desc_head_idx += num;
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ return 0;
+}
static inline int
virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
@@ -225,13 +347,6 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)
}
}
-static inline int
-tx_offload_enabled(struct virtio_hw *hw)
-{
- return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
-}
/* avoid write operation when necessary, to lessen cache issues */
#define ASSIGN_UNLESS_EQUAL(var, val) do { \
@@ -240,8 +355,111 @@ tx_offload_enabled(struct virtio_hw *hw)
} while (0)
static inline void
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
+ struct rte_mbuf *cookie,
+ bool offload)
+{
+ if (offload) {
+ if (cookie->ol_flags & PKT_TX_TCP_SEG)
+ cookie->ol_flags |= PKT_TX_TCP_CKSUM;
+
+ switch (cookie->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct udp_hdr,
+ dgram_cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ case PKT_TX_TCP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ default:
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ break;
+ }
+
+ /* TCP Segmentation Offload */
+ if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+ virtio_tso_fix_cksum(cookie);
+ hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+ VIRTIO_NET_HDR_GSO_TCPV6 :
+ VIRTIO_NET_HDR_GSO_TCPV4;
+ hdr->gso_size = cookie->tso_segsz;
+ hdr->hdr_len =
+ cookie->l2_len +
+ cookie->l3_len +
+ cookie->l4_len;
+ } else {
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+ }
+}
+
+static inline void
+virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
+ struct rte_mbuf **cookies,
+ uint16_t num)
+{
+ struct vq_desc_extra *dxp;
+ struct virtqueue *vq = txvq->vq;
+ struct vring_desc *start_dp;
+ struct virtio_net_hdr *hdr;
+ uint16_t idx;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
+ uint16_t i = 0;
+
+ idx = vq->vq_desc_head_idx;
+ start_dp = vq->vq_ring.desc;
+
+ while (i < num) {
+ idx = idx & (vq->vq_nentries - 1);
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookies[i];
+ dxp->ndescs = 1;
+
+ hdr = (struct virtio_net_hdr *)
+ rte_pktmbuf_prepend(cookies[i], head_size);
+ cookies[i]->pkt_len -= head_size;
+
+ /* if offload disabled, it is not zeroed below, do it now */
+ if (!vq->hw->has_tx_offload) {
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+
+ virtqueue_xmit_offload(hdr, cookies[i],
+ vq->hw->has_tx_offload);
+
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
+ start_dp[idx].len = cookies[i]->data_len;
+ start_dp[idx].flags = 0;
+
+ vq_update_avail_ring(vq, idx);
+
+ idx++;
+ i++;
+ };
+
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
+}
+
+static inline void
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
- uint16_t needed, int use_indirect, int can_push)
+ uint16_t needed, int use_indirect, int can_push,
+ int in_order)
{
struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
struct vq_desc_extra *dxp;
@@ -251,9 +469,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t head_idx, idx;
uint16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
- int offload;
- offload = tx_offload_enabled(vq->hw);
head_idx = vq->vq_desc_head_idx;
idx = head_idx;
dxp = &vq->vq_descx[idx];
@@ -270,8 +486,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
* which is wrong. Below subtract restores correct pkt size.
*/
cookie->pkt_len -= head_size;
+
/* if offload disabled, it is not zeroed below, do it now */
- if (offload == 0) {
+ if (!vq->hw->has_tx_offload) {
ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
@@ -308,49 +525,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
idx = start_dp[idx].next;
}
- /* Checksum Offload / TSO */
- if (offload) {
- if (cookie->ol_flags & PKT_TX_TCP_SEG)
- cookie->ol_flags |= PKT_TX_TCP_CKSUM;
-
- switch (cookie->ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct udp_hdr,
- dgram_cksum);
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- break;
-
- case PKT_TX_TCP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- break;
-
- default:
- ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
- ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
- ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
- break;
- }
-
- /* TCP Segmentation Offload */
- if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- virtio_tso_fix_cksum(cookie);
- hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
- VIRTIO_NET_HDR_GSO_TCPV6 :
- VIRTIO_NET_HDR_GSO_TCPV4;
- hdr->gso_size = cookie->tso_segsz;
- hdr->hdr_len =
- cookie->l2_len +
- cookie->l3_len +
- cookie->l4_len;
- } else {
- ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
- ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
- }
- }
+ virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
do {
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
@@ -362,11 +537,15 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
if (use_indirect)
idx = vq->vq_ring.desc[head_idx].next;
- vq->vq_desc_head_idx = idx;
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = idx;
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+
+ vq->vq_desc_head_idx = idx;
vq_update_avail_ring(vq, head_idx);
+
+ if (!in_order) {
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = idx;
+ }
}
void
@@ -421,7 +600,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
struct virtnet_rx *rxvq = &vq->rxq;
struct rte_mbuf *m;
uint16_t desc_idx;
- int error, nbufs;
+ int error, nbufs, i;
PMD_INIT_FUNC_TRACE();
@@ -451,6 +630,25 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
virtio_rxq_rearm_vec(rxvq);
nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
}
+ } else if (hw->use_inorder_rx) {
+ if ((!virtqueue_full(vq))) {
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *pkts[free_cnt];
+
+ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
+ free_cnt)) {
+ error = virtqueue_enqueue_refill_inorder(vq,
+ pkts,
+ free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
+ }
+
+ nbufs += free_cnt;
+ vq_update_avail_idx(vq);
+ }
} else {
while (!virtqueue_full(vq)) {
m = rte_mbuf_raw_alloc(rxvq->mpool);
@@ -498,10 +696,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- /* cannot use simple rxtx funcs with multisegs or offloads */
- if (dev->data->dev_conf.txmode.offloads)
- hw->use_simple_tx = 0;
-
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
@@ -536,31 +730,11 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
struct virtio_hw *hw = dev->data->dev_private;
struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
- uint16_t mid_idx = vq->vq_nentries >> 1;
- struct virtnet_tx *txvq = &vq->txq;
- uint16_t desc_idx;
PMD_INIT_FUNC_TRACE();
- if (hw->use_simple_tx) {
- for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
- vq->vq_ring.avail->ring[desc_idx] =
- desc_idx + mid_idx;
- vq->vq_ring.desc[desc_idx + mid_idx].next =
- desc_idx;
- vq->vq_ring.desc[desc_idx + mid_idx].addr =
- txvq->virtio_net_hdr_mem +
- offsetof(struct virtio_tx_region, tx_hdr);
- vq->vq_ring.desc[desc_idx + mid_idx].len =
- vq->hw->vtnet_hdr_size;
- vq->vq_ring.desc[desc_idx + mid_idx].flags =
- VRING_DESC_F_NEXT;
- vq->vq_ring.desc[desc_idx].flags = 0;
- }
- for (desc_idx = mid_idx; desc_idx < vq->vq_nentries;
- desc_idx++)
- vq->vq_ring.avail->ring[desc_idx] = desc_idx;
- }
+ if (hw->use_inorder_tx)
+ vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
VIRTQUEUE_DUMP(vq);
@@ -576,6 +750,19 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
* successful since it was just dequeued.
*/
error = virtqueue_enqueue_recv_refill(vq, m);
+
+ if (unlikely(error)) {
+ RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+ rte_pktmbuf_free(m);
+ }
+}
+
+static void
+virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
+{
+ int error;
+
+ error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
if (unlikely(error)) {
RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
rte_pktmbuf_free(m);
@@ -614,6 +801,15 @@ virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
}
}
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+ VIRTIO_DUMP_PACKET(m, m->data_len);
+
+ rxvq->stats.bytes += m->pkt_len;
+ virtio_update_packet_stats(&rxvq->stats, m);
+}
+
/* Optionally fill offload information in structure */
static int
virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
@@ -686,14 +882,6 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
return 0;
}
-static inline int
-rx_offload_enabled(struct virtio_hw *hw)
-{
- return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
-}
-
#define VIRTIO_MBUF_BURST_SZ 64
#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
uint16_t
@@ -709,7 +897,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
int error;
uint32_t i, nb_enqueued;
uint32_t hdr_size;
- int offload;
struct virtio_net_hdr *hdr;
nb_rx = 0;
@@ -731,7 +918,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_enqueued = 0;
hdr_size = hw->vtnet_hdr_size;
- offload = rx_offload_enabled(hw);
for (i = 0; i < num ; i++) {
rxm = rcv_pkts[i];
@@ -760,24 +946,20 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (hw->vlan_strip)
rte_vlan_strip(rxm);
- if (offload && virtio_rx_offload(rxm, hdr) < 0) {
+ if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.errors++;
continue;
}
- VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
+ virtio_rx_stats_updated(rxvq, rxm);
rx_pkts[nb_rx++] = rxm;
-
- rxvq->stats.bytes += rxm->pkt_len;
- virtio_update_packet_stats(&rxvq->stats, rxm);
}
rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
- error = ENOSPC;
while (likely(!virtqueue_full(vq))) {
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
@@ -807,6 +989,193 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
uint16_t
+virtio_recv_mergeable_pkts_inorder(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *prev;
+ uint16_t nb_used, num, nb_rx;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ int error;
+ uint32_t nb_enqueued;
+ uint32_t seg_num;
+ uint32_t seg_res;
+ uint32_t hdr_size;
+ int32_t i;
+
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+ nb_used = RTE_MIN(nb_used, nb_pkts);
+ nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
+
+ virtio_rmb();
+
+ PMD_RX_LOG(DEBUG, "used:%d", nb_used);
+
+ nb_enqueued = 0;
+ seg_num = 1;
+ seg_res = 0;
+ hdr_size = hw->vtnet_hdr_size;
+
+ num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
+
+ for (i = 0; i < num; i++) {
+ struct virtio_net_hdr_mrg_rxbuf *header;
+
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ rxm = rcv_pkts[i];
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf_inorder(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ header = (struct virtio_net_hdr_mrg_rxbuf *)
+ ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
+ - hdr_size);
+ seg_num = header->num_buffers;
+
+ if (seg_num == 0)
+ seg_num = 1;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->nb_segs = seg_num;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ rxm->port = rxvq->port_id;
+
+ rx_pkts[nb_rx] = rxm;
+ prev = rxm;
+
+ if (vq->hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
+ virtio_discard_rxbuf_inorder(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rx_pkts[nb_rx]);
+
+ seg_res = seg_num - 1;
+
+ /* Merge remaining segments */
+ while (seg_res != 0 && i < (num - 1)) {
+ i++;
+
+ rxm = rcv_pkts[i];
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[i]);
+ rxm->data_len = (uint16_t)(len[i]);
+
+ rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
+ rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
+
+ if (prev)
+ prev->next = rxm;
+
+ prev = rxm;
+ seg_res -= 1;
+ }
+
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ }
+
+ /* Last packet still need merge segments */
+ while (seg_res != 0) {
+ uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
+ VIRTIO_MBUF_BURST_SZ);
+
+ prev = rcv_pkts[nb_rx];
+ if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
+ rcv_cnt);
+ uint16_t extra_idx = 0;
+
+ rcv_cnt = num;
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
+ rxm->data_off =
+ RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
+ prev->next = rxm;
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+ rx_pkts[nb_rx]->data_len += len[extra_idx];
+ extra_idx += 1;
+ };
+ seg_res -= rcv_cnt;
+
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ virtio_discard_rxbuf_inorder(vq, prev);
+ rxvq->stats.errors++;
+ break;
+ }
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+
+ if (likely(!virtqueue_full(vq))) {
+ /* free_cnt may include mrg descs */
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *new_pkts[free_cnt];
+
+ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
+ error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
+ free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(new_pkts[i]);
+ }
+ nb_enqueued += free_cnt;
+ } else {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed += free_cnt;
+ }
+ }
+
+ if (likely(nb_enqueued)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
+uint16_t
virtio_recv_mergeable_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
@@ -825,7 +1194,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t extra_idx;
uint32_t seg_res;
uint32_t hdr_size;
- int offload;
nb_rx = 0;
if (unlikely(hw->started == 0))
@@ -843,7 +1211,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
extra_idx = 0;
seg_res = 0;
hdr_size = hw->vtnet_hdr_size;
- offload = rx_offload_enabled(hw);
while (i < nb_used) {
struct virtio_net_hdr_mrg_rxbuf *header;
@@ -888,7 +1255,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rx_pkts[nb_rx] = rxm;
prev = rxm;
- if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
+ if (hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.errors++;
continue;
@@ -950,7 +1318,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
- error = ENOSPC;
while (likely(!virtqueue_full(vq))) {
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
@@ -1053,12 +1420,124 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Enqueue Packet buffers */
- virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push);
+ virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
+ can_push, 0);
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ }
+
+ txvq->stats.packets += nb_tx;
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
+
+uint16_t
+virtio_xmit_pkts_inorder(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ uint16_t hdr_size = hw->vtnet_hdr_size;
+ uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+ struct rte_mbuf *inorder_pkts[nb_pkts];
+ int error;
+
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
+ return nb_tx;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+
+ VIRTQUEUE_DUMP(vq);
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ virtio_rmb();
+ if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
+ virtio_xmit_cleanup_inorder(vq, nb_used);
+
+ if (unlikely(!vq->vq_free_cnt))
+ virtio_xmit_cleanup_inorder(vq, nb_used);
+
+ nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
+
+ for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ int slots, need;
+
+ /* Do VLAN tag insertion */
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&txm);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(txm);
+ continue;
+ }
+ }
+
+ /* optimize ring usage */
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ rte_mbuf_refcnt_read(txm) == 1 &&
+ RTE_MBUF_DIRECT(txm) &&
+ txm->nb_segs == 1 &&
+ rte_pktmbuf_headroom(txm) >= hdr_size &&
+ rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
+ __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
+ inorder_pkts[nb_inorder_pkts] = txm;
+ nb_inorder_pkts++;
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ continue;
+ }
+
+ if (nb_inorder_pkts) {
+ virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
+ nb_inorder_pkts);
+ nb_inorder_pkts = 0;
+ }
+
+ slots = txm->nb_segs + 1;
+ need = slots - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ nb_used = VIRTQUEUE_NUSED(vq);
+ virtio_rmb();
+ need = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup_inorder(vq, need);
+
+ need = slots - vq->vq_free_cnt;
+
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ break;
+ }
+ }
+ /* Enqueue Packet buffers */
+ virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
txvq->stats.bytes += txm->pkt_len;
virtio_update_packet_stats(&txvq->stats, txm);
}
+ /* Transmit all inorder packets */
+ if (nb_inorder_pkts)
+ virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
+ nb_inorder_pkts);
+
txvq->stats.packets += nb_tx;
if (likely(nb_tx)) {
@@ -1070,5 +1549,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
}
+ VIRTQUEUE_DUMP(vq);
+
return nb_tx;
}
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index 51520758..31e565b4 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -27,73 +27,6 @@
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
-uint16_t
-virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
-{
- struct virtnet_tx *txvq = tx_queue;
- struct virtqueue *vq = txvq->vq;
- struct virtio_hw *hw = vq->hw;
- uint16_t nb_used;
- uint16_t desc_idx;
- struct vring_desc *start_dp;
- uint16_t nb_tail, nb_commit;
- int i;
- uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
- uint16_t nb_tx = 0;
-
- if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
- return nb_tx;
-
- nb_used = VIRTQUEUE_NUSED(vq);
- rte_compiler_barrier();
-
- if (nb_used >= VIRTIO_TX_FREE_THRESH)
- virtio_xmit_cleanup_simple(vq);
-
- nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
- desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
- start_dp = vq->vq_ring.desc;
- nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
-
- if (nb_commit >= nb_tail) {
- for (i = 0; i < nb_tail; i++)
- vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
- for (i = 0; i < nb_tail; i++) {
- start_dp[desc_idx].addr =
- VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
- start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
- tx_pkts++;
- desc_idx++;
- }
- nb_commit -= nb_tail;
- desc_idx = 0;
- }
- for (i = 0; i < nb_commit; i++)
- vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
- for (i = 0; i < nb_commit; i++) {
- start_dp[desc_idx].addr =
- VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
- start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
- tx_pkts++;
- desc_idx++;
- }
-
- rte_compiler_barrier();
-
- vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
- vq->vq_avail_idx += nb_pkts;
- vq->vq_ring.avail->idx = vq->vq_avail_idx;
- txvq->stats.packets += nb_pkts;
-
- if (likely(nb_pkts)) {
- if (unlikely(virtqueue_kick_prepare(vq)))
- virtqueue_notify(vq);
- }
-
- return nb_pkts;
-}
-
int __attribute__((cold))
virtio_rxq_vec_setup(struct virtnet_rx *rxq)
{
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index 303904d6..dc97e4cc 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -55,53 +55,4 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
vq_update_avail_idx(vq);
}
-#define VIRTIO_TX_FREE_THRESH 32
-#define VIRTIO_TX_MAX_FREE_BUF_SZ 32
-#define VIRTIO_TX_FREE_NR 32
-/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
-static inline void
-virtio_xmit_cleanup_simple(struct virtqueue *vq)
-{
- uint16_t i, desc_idx;
- uint32_t nb_free = 0;
- struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
-
- desc_idx = (uint16_t)(vq->vq_used_cons_idx &
- ((vq->vq_nentries >> 1) - 1));
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = rte_pktmbuf_prefree_seg(m);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = rte_pktmbuf_prefree_seg(m);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool))
- free[nb_free++] = m;
- else {
- rte_mempool_put_bulk(free[0]->pool,
- (void **)free,
- RTE_MIN(RTE_DIM(free),
- nb_free));
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free,
- RTE_MIN(RTE_DIM(free), nb_free));
- } else {
- for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = rte_pktmbuf_prefree_seg(m);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
- vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR;
- vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1);
-}
-
#endif /* _VIRTIO_RXTX_SIMPLE_H_ */
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 4322527f..7df600b0 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -371,11 +371,13 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
1ULL << VIRTIO_NET_F_GUEST_CSUM | \
1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
+ 1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_VERSION_1)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac, char **ifname)
+ int cq, int queue_size, const char *mac, char **ifname,
+ int mrg_rxbuf, int in_order)
{
pthread_mutex_init(&dev->mutex, NULL);
snprintf(dev->path, PATH_MAX, "%s", path);
@@ -384,6 +386,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
dev->mac_specified = 0;
+ dev->unsupported_features = 0;
parse_mac(dev, mac);
if (*ifname) {
@@ -419,10 +422,22 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
}
- if (dev->mac_specified)
+ if (!mrg_rxbuf) {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MRG_RXBUF);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
+ }
+
+ if (!in_order) {
+ dev->device_features &= ~(1ull << VIRTIO_F_IN_ORDER);
+ dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
+ }
+
+ if (dev->mac_specified) {
dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
- else
+ } else {
dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
+ }
if (cq) {
/* device does not really need to know anything about CQ,
@@ -437,6 +452,14 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
}
/* The backend will not report this feature, we add it explicitly */
@@ -444,6 +467,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features |= (1ull << VIRTIO_NET_F_STATUS);
dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
+ dev->unsupported_features |= ~VIRTIO_USER_SUPPORTED_FEATURES;
if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
virtio_user_mem_event_cb, dev)) {
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index d2d4cb82..d6e0e137 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -33,6 +33,7 @@ struct virtio_user_dev {
* and will be sync with device
*/
uint64_t device_features; /* supported features by device */
+ uint64_t unsupported_features; /* unsupported features mask */
uint8_t status;
uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
@@ -47,7 +48,8 @@ int is_vhost_user_by_type(const char *path);
int virtio_user_start_device(struct virtio_user_dev *dev);
int virtio_user_stop_device(struct virtio_user_dev *dev);
int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac, char **ifname);
+ int cq, int queue_size, const char *mac, char **ifname,
+ int mrg_rxbuf, int in_order);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 1c102ca7..525d16ca 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -30,7 +30,6 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
int ret;
int flag;
int connectfd;
- uint64_t features = dev->device_features;
struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
connectfd = accept(dev->listenfd, NULL, NULL);
@@ -45,15 +44,8 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
return -1;
}
- features &= ~dev->device_features;
- /* For following bits, vhost-user doesn't really need to know */
- features &= ~(1ull << VIRTIO_NET_F_MAC);
- features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
- features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
- features &= ~(1ull << VIRTIO_NET_F_STATUS);
- if (features)
- PMD_INIT_LOG(ERR, "WARNING: Some features 0x%" PRIx64 " are not supported by vhost-user!",
- features);
+ /* umask vhost-user unsupported features */
+ dev->device_features &= ~(dev->unsupported_features);
dev->features &= dev->device_features;
@@ -366,8 +358,12 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_QUEUE_SIZE,
#define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
VIRTIO_USER_ARG_INTERFACE_NAME,
-#define VIRTIO_USER_ARG_SERVER_MODE "server"
+#define VIRTIO_USER_ARG_SERVER_MODE "server"
VIRTIO_USER_ARG_SERVER_MODE,
+#define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf"
+ VIRTIO_USER_ARG_MRG_RXBUF,
+#define VIRTIO_USER_ARG_IN_ORDER "in_order"
+ VIRTIO_USER_ARG_IN_ORDER,
NULL
};
@@ -440,7 +436,8 @@ virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
hw->use_msix = 1;
hw->modern = 0;
hw->use_simple_rx = 0;
- hw->use_simple_tx = 0;
+ hw->use_inorder_rx = 0;
+ hw->use_inorder_tx = 0;
hw->virtio_user_dev = dev;
return eth_dev;
}
@@ -470,6 +467,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
+ uint64_t mrg_rxbuf = 1;
+ uint64_t in_order = 1;
char *path = NULL;
char *ifname = NULL;
char *mac_addr = NULL;
@@ -569,6 +568,24 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
goto end;
}
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
+ &get_integer_arg, &mrg_rxbuf) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_MRG_RXBUF);
+ goto end;
+ }
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER,
+ &get_integer_arg, &in_order) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_IN_ORDER);
+ goto end;
+ }
+ }
+
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
struct virtio_user_dev *vu_dev;
@@ -585,7 +602,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
else
vu_dev->is_server = false;
if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
- queue_size, mac_addr, &ifname) < 0) {
+ queue_size, mac_addr, &ifname, mrg_rxbuf,
+ in_order) < 0) {
PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
virtio_user_eth_dev_free(eth_dev);
goto end;
@@ -663,4 +681,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
"cq=<int> "
"queue_size=<int> "
"queues=<int> "
- "iface=<string>");
+ "iface=<string> "
+ "server=<0|1> "
+ "mrg_rxbuf=<0|1> "
+ "in_order=<0|1>");
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index a7d0a9cb..56a77cc7 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -74,6 +74,14 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
desc_idx = used_idx;
rte_pktmbuf_free(vq->sw_ring[desc_idx]);
vq->vq_free_cnt++;
+ } else if (hw->use_inorder_rx) {
+ desc_idx = (uint16_t)uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq_ring_free_inorder(vq, desc_idx, 1);
} else {
desc_idx = (uint16_t)uep->id;
dxp = &vq->vq_descx[desc_idx];
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 14364f35..26518ed9 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -306,6 +306,8 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
+ uint16_t num);
static inline void
vq_update_avail_idx(struct virtqueue *vq)
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index ba932ff2..2613cd13 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -57,7 +57,8 @@
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_JUMBO_FRAME)
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CRC_STRIP)
static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
@@ -1053,7 +1054,6 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused,
dev_info->speed_capa = ETH_LINK_SPEED_10G;
dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
- dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -1320,9 +1320,7 @@ RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(vmxnet3_init_log);
-static void
-vmxnet3_init_log(void)
+RTE_INIT(vmxnet3_init_log)
{
vmxnet3_logtype_init = rte_log_register("pmd.net.vmxnet3.init");
if (vmxnet3_logtype_init >= 0)
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
index 095a34b2..469960a3 100644
--- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -289,10 +289,7 @@ static struct rte_vdev_driver dpaa2_cmdif_drv = {
RTE_PMD_REGISTER_VDEV(DPAA2_CMDIF_PMD_NAME, dpaa2_cmdif_drv);
-RTE_INIT(dpaa2_cmdif_init_log);
-
-static void
-dpaa2_cmdif_init_log(void)
+RTE_INIT(dpaa2_cmdif_init_log)
{
dpaa2_cmdif_logtype = rte_log_register("pmd.raw.dpaa2.cmdif");
if (dpaa2_cmdif_logtype >= 0)
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
index 598a621c..8991e832 100644
--- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
@@ -19,7 +19,7 @@ extern int dpaa2_cmdif_logtype;
rte_log(RTE_LOG_DEBUG, dpaa2_cmdif_logtype, "dpaa2_cmdif: %s(): " \
fmt "\n", __func__, ## args)
-#define DPAA2_CMDIF_FUNC_TRACE() DPAA2_CMDIF_LOG(DEBUG, ">>")
+#define DPAA2_CMDIF_FUNC_TRACE() DPAA2_CMDIF_DEBUG(">>")
#define DPAA2_CMDIF_INFO(fmt, args...) \
DPAA2_CMDIF_LOG(INFO, fmt, ## args)
diff --git a/drivers/raw/dpaa2_cmdif/meson.build b/drivers/raw/dpaa2_cmdif/meson.build
index 8c909438..1d146872 100644
--- a/drivers/raw/dpaa2_cmdif/meson.build
+++ b/drivers/raw/dpaa2_cmdif/meson.build
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
deps += ['rawdev', 'mempool_dpaa2', 'bus_vdev']
sources = files('dpaa2_cmdif.c')
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
index 1d15c302..2787d302 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -985,6 +985,7 @@ rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
}
static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
+ .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
.drv_type = DPAA2_QDMA,
.probe = rte_dpaa2_qdma_probe,
.remove = rte_dpaa2_qdma_remove,
@@ -992,9 +993,7 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
-RTE_INIT(dpaa2_qdma_init_log);
-static void
-dpaa2_qdma_init_log(void)
+RTE_INIT(dpaa2_qdma_init_log)
{
dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
if (dpaa2_qdma_logtype >= 0)
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
index fafe352b..4779e4ce 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
@@ -19,7 +19,7 @@ extern int dpaa2_qdma_logtype;
rte_log(RTE_LOG_DEBUG, dpaa2_qdma_logtype, "dpaa2_qdma: %s(): " \
fmt "\n", __func__, ## args)
-#define DPAA2_QDMA_FUNC_TRACE() DPAA2_QDMA_LOG(DEBUG, ">>")
+#define DPAA2_QDMA_FUNC_TRACE() DPAA2_QDMA_DEBUG(">>")
#define DPAA2_QDMA_INFO(fmt, args...) \
DPAA2_QDMA_LOG(INFO, fmt, ## args)
diff --git a/drivers/raw/dpaa2_qdma/meson.build b/drivers/raw/dpaa2_qdma/meson.build
index a2eb1d2f..b6a081f1 100644
--- a/drivers/raw/dpaa2_qdma/meson.build
+++ b/drivers/raw/dpaa2_qdma/meson.build
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
deps += ['rawdev', 'mempool_dpaa2', 'ring']
sources = files('dpaa2_qdma.c')
diff --git a/drivers/raw/ifpga_rawdev/ifpga_rawdev.c b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
index 030ed1b6..3fed0578 100644
--- a/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
+++ b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
@@ -517,9 +517,7 @@ RTE_PMD_REGISTER_PCI(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
RTE_PMD_REGISTER_PCI_TABLE(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
RTE_PMD_REGISTER_KMOD_DEP(ifpga_rawdev_pci_driver, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(ifpga_rawdev_init_log);
-static void
-ifpga_rawdev_init_log(void)
+RTE_INIT(ifpga_rawdev_init_log)
{
ifpga_rawdev_logtype = rte_log_register("driver.raw.init");
if (ifpga_rawdev_logtype >= 0)
diff --git a/drivers/raw/skeleton_rawdev/Makefile b/drivers/raw/skeleton_rawdev/Makefile
index bacc66dd..3f97c2ee 100644
--- a/drivers/raw/skeleton_rawdev/Makefile
+++ b/drivers/raw/skeleton_rawdev/Makefile
@@ -8,7 +8,6 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_skeleton_rawdev.a
-CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal
diff --git a/drivers/raw/skeleton_rawdev/meson.build b/drivers/raw/skeleton_rawdev/meson.build
index 7cb2d3fb..b4a6ed08 100644
--- a/drivers/raw/skeleton_rawdev/meson.build
+++ b/drivers/raw/skeleton_rawdev/meson.build
@@ -4,5 +4,3 @@
deps += ['rawdev', 'kvargs', 'mbuf', 'bus_vdev']
sources = files('skeleton_rawdev.c',
'skeleton_rawdev_test.c')
-
-allow_experimental_apis = true
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
index 6bdbbb50..6518a2d9 100644
--- a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
@@ -305,6 +305,18 @@ static int skeleton_rawdev_queue_release(struct rte_rawdev *dev,
return ret;
}
+static uint16_t skeleton_rawdev_queue_count(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ return skeldev->num_queues;
+}
+
static int skeleton_rawdev_get_attr(struct rte_rawdev *dev,
const char *attr_name,
uint64_t *attr_value)
@@ -524,6 +536,7 @@ static const struct rte_rawdev_ops skeleton_rawdev_ops = {
.queue_def_conf = skeleton_rawdev_queue_def_conf,
.queue_setup = skeleton_rawdev_queue_setup,
.queue_release = skeleton_rawdev_queue_release,
+ .queue_count = skeleton_rawdev_queue_count,
.attr_get = skeleton_rawdev_get_attr,
.attr_set = skeleton_rawdev_set_attr,
@@ -744,10 +757,7 @@ static struct rte_vdev_driver skeleton_pmd_drv = {
RTE_PMD_REGISTER_VDEV(SKELETON_PMD_RAWDEV_NAME, skeleton_pmd_drv);
-RTE_INIT(skeleton_pmd_init_log);
-
-static void
-skeleton_pmd_init_log(void)
+RTE_INIT(skeleton_pmd_init_log)
{
skeleton_pmd_logtype = rte_log_register("rawdev.skeleton");
if (skeleton_pmd_logtype >= 0)
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
index 3eb5c3a7..3405b898 100644
--- a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
@@ -194,6 +194,18 @@ test_rawdev_queue_default_conf_get(void)
}
static int
+test_rawdev_queue_count(void)
+{
+ unsigned int q_count;
+
+ /* Get the current configuration */
+ q_count = rte_rawdev_queue_count(TEST_DEV_ID);
+ RTE_TEST_ASSERT_EQUAL(q_count, 1, "Invalid queue count (%d)", q_count);
+
+ return TEST_SUCCESS;
+}
+
+static int
test_rawdev_queue_setup(void)
{
int ret;
@@ -429,6 +441,7 @@ test_rawdev_skeldev(void)
SKELDEV_TEST_RUN(test_rawdev_configure, NULL,
test_rawdev_queue_default_conf_get);
SKELDEV_TEST_RUN(test_rawdev_configure, NULL, test_rawdev_queue_setup);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_queue_count);
SKELDEV_TEST_RUN(test_rawdev_queue_setup, NULL,
test_rawdev_queue_release);
SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_attr_set_get);